aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
authorSverker Eriksson <[email protected]>2019-02-06 19:10:26 +0100
committerSverker Eriksson <[email protected]>2019-02-06 19:10:26 +0100
commit98cfd6016f8b40fc97e03b31177d14318349040f (patch)
treec0fcdd768071c36bfbcbf186d369d9ca14c47421 /erts/emulator/beam
parente2ca71b6e7172b320b5b171359d53a161383fb19 (diff)
parent3825199794da28d79b21052a2e69e2335921d55e (diff)
downloadotp-98cfd6016f8b40fc97e03b31177d14318349040f.tar.gz
otp-98cfd6016f8b40fc97e03b31177d14318349040f.tar.bz2
otp-98cfd6016f8b40fc97e03b31177d14318349040f.zip
Merge tag 'OTP-21.2' into sverker/map-from-ks-vs-bug
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/arith_instrs.tab396
-rw-r--r--erts/emulator/beam/atom.c99
-rw-r--r--erts/emulator/beam/atom.h14
-rw-r--r--erts/emulator/beam/atom.names101
-rw-r--r--erts/emulator/beam/beam_bif_load.c950
-rw-r--r--erts/emulator/beam/beam_bp.c611
-rw-r--r--erts/emulator/beam/beam_bp.h66
-rw-r--r--erts/emulator/beam/beam_debug.c817
-rw-r--r--erts/emulator/beam/beam_emu.c5346
-rw-r--r--erts/emulator/beam/beam_load.c1526
-rw-r--r--erts/emulator/beam/beam_load.h55
-rw-r--r--erts/emulator/beam/beam_ranges.c76
-rw-r--r--erts/emulator/beam/bif.c2929
-rw-r--r--erts/emulator/beam/bif.h205
-rw-r--r--erts/emulator/beam/bif.tab164
-rw-r--r--erts/emulator/beam/bif_instrs.tab547
-rw-r--r--erts/emulator/beam/big.c53
-rw-r--r--erts/emulator/beam/big.h19
-rw-r--r--erts/emulator/beam/binary.c130
-rw-r--r--erts/emulator/beam/break.c476
-rw-r--r--erts/emulator/beam/bs_instrs.tab1038
-rw-r--r--erts/emulator/beam/code_ix.c37
-rw-r--r--erts/emulator/beam/code_ix.h92
-rw-r--r--erts/emulator/beam/copy.c170
-rw-r--r--erts/emulator/beam/dist.c3822
-rw-r--r--erts/emulator/beam/dist.h235
-rw-r--r--erts/emulator/beam/dtrace-wrapper.h4
-rw-r--r--erts/emulator/beam/erl_afit_alloc.c8
-rw-r--r--erts/emulator/beam/erl_alloc.c618
-rw-r--r--erts/emulator/beam/erl_alloc.h162
-rw-r--r--erts/emulator/beam/erl_alloc.types140
-rw-r--r--erts/emulator/beam/erl_alloc_util.c2652
-rw-r--r--erts/emulator/beam/erl_alloc_util.h248
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c315
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.h18
-rw-r--r--erts/emulator/beam/erl_arith.c22
-rw-r--r--erts/emulator/beam/erl_async.c90
-rw-r--r--erts/emulator/beam/erl_async.h29
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.c8
-rw-r--r--erts/emulator/beam/erl_bif_atomics.c256
-rw-r--r--erts/emulator/beam/erl_bif_binary.c1383
-rw-r--r--erts/emulator/beam/erl_bif_chksum.c10
-rw-r--r--erts/emulator/beam/erl_bif_counters.c255
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c184
-rw-r--r--erts/emulator/beam/erl_bif_guard.c72
-rw-r--r--erts/emulator/beam/erl_bif_info.c3297
-rw-r--r--erts/emulator/beam/erl_bif_lists.c982
-rw-r--r--erts/emulator/beam/erl_bif_op.c2
-rw-r--r--erts/emulator/beam/erl_bif_os.c184
-rw-r--r--erts/emulator/beam/erl_bif_persistent.c983
-rw-r--r--erts/emulator/beam/erl_bif_port.c275
-rw-r--r--erts/emulator/beam/erl_bif_re.c75
-rw-r--r--erts/emulator/beam/erl_bif_trace.c581
-rw-r--r--erts/emulator/beam/erl_bif_unique.c322
-rw-r--r--erts/emulator/beam/erl_bif_unique.h346
-rw-r--r--erts/emulator/beam/erl_binary.h261
-rw-r--r--erts/emulator/beam/erl_bits.c80
-rw-r--r--erts/emulator/beam/erl_bits.h61
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c165
-rw-r--r--erts/emulator/beam/erl_cpu_topology.h12
-rw-r--r--erts/emulator/beam/erl_db.c2879
-rw-r--r--erts/emulator/beam/erl_db.h70
-rw-r--r--erts/emulator/beam/erl_db_hash.c3138
-rw-r--r--erts/emulator/beam/erl_db_hash.h61
-rw-r--r--erts/emulator/beam/erl_db_tree.c620
-rw-r--r--erts/emulator/beam/erl_db_tree.h4
-rw-r--r--erts/emulator/beam/erl_db_util.c513
-rw-r--r--erts/emulator/beam/erl_db_util.h175
-rw-r--r--erts/emulator/beam/erl_debug.c48
-rw-r--r--erts/emulator/beam/erl_dirty_bif.tab85
-rw-r--r--erts/emulator/beam/erl_driver.h52
-rw-r--r--erts/emulator/beam/erl_drv_nif.h46
-rw-r--r--erts/emulator/beam/erl_drv_thread.c210
-rw-r--r--erts/emulator/beam/erl_fun.c60
-rw-r--r--erts/emulator/beam/erl_fun.h14
-rw-r--r--erts/emulator/beam/erl_gc.c1082
-rw-r--r--erts/emulator/beam/erl_gc.h119
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.c20
-rw-r--r--erts/emulator/beam/erl_hl_timer.c2004
-rw-r--r--erts/emulator/beam/erl_hl_timer.h18
-rw-r--r--erts/emulator/beam/erl_init.c549
-rw-r--r--erts/emulator/beam/erl_instrument.c1277
-rw-r--r--erts/emulator/beam/erl_instrument.h42
-rw-r--r--erts/emulator/beam/erl_io_queue.c1240
-rw-r--r--erts/emulator/beam/erl_io_queue.h201
-rw-r--r--erts/emulator/beam/erl_lock_check.c1096
-rw-r--r--erts/emulator/beam/erl_lock_check.h60
-rw-r--r--erts/emulator/beam/erl_lock_count.c947
-rw-r--r--erts/emulator/beam/erl_lock_count.h1013
-rw-r--r--erts/emulator/beam/erl_lock_flags.c59
-rw-r--r--erts/emulator/beam/erl_lock_flags.h78
-rw-r--r--erts/emulator/beam/erl_map.c493
-rw-r--r--erts/emulator/beam/erl_map.h5
-rw-r--r--erts/emulator/beam/erl_math.c13
-rw-r--r--erts/emulator/beam/erl_message.c709
-rw-r--r--erts/emulator/beam/erl_message.h345
-rw-r--r--erts/emulator/beam/erl_monitor_link.c1326
-rw-r--r--erts/emulator/beam/erl_monitor_link.h2354
-rw-r--r--erts/emulator/beam/erl_monitors.c1047
-rw-r--r--erts/emulator/beam/erl_monitors.h182
-rw-r--r--erts/emulator/beam/erl_msacc.c56
-rw-r--r--erts/emulator/beam/erl_msacc.h59
-rw-r--r--erts/emulator/beam/erl_mtrace.c37
-rw-r--r--erts/emulator/beam/erl_nfunc_sched.c180
-rw-r--r--erts/emulator/beam/erl_nfunc_sched.h328
-rw-r--r--erts/emulator/beam/erl_nif.c2707
-rw-r--r--erts/emulator/beam/erl_nif.h91
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h64
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h41
-rw-r--r--erts/emulator/beam/erl_node_tables.c1204
-rw-r--r--erts/emulator/beam/erl_node_tables.h160
-rw-r--r--erts/emulator/beam/erl_port.h274
-rw-r--r--erts/emulator/beam/erl_port_task.c456
-rw-r--r--erts/emulator/beam/erl_port_task.h78
-rw-r--r--erts/emulator/beam/erl_posix_str.c6
-rw-r--r--erts/emulator/beam/erl_printf_term.c20
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.c4808
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.h1051
-rw-r--r--erts/emulator/beam/erl_process.c8062
-rw-r--r--erts/emulator/beam/erl_process.h951
-rw-r--r--erts/emulator/beam/erl_process_dict.c248
-rw-r--r--erts/emulator/beam/erl_process_dict.h11
-rw-r--r--erts/emulator/beam/erl_process_dump.c738
-rw-r--r--erts/emulator/beam/erl_process_lock.c363
-rw-r--r--erts/emulator/beam/erl_process_lock.h370
-rw-r--r--erts/emulator/beam/erl_ptab.c153
-rw-r--r--erts/emulator/beam/erl_ptab.h66
-rw-r--r--erts/emulator/beam/erl_rbtree.h247
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c49
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.h26
-rw-r--r--erts/emulator/beam/erl_smp.h1640
-rw-r--r--erts/emulator/beam/erl_term.c42
-rw-r--r--erts/emulator/beam/erl_term.h321
-rw-r--r--erts/emulator/beam/erl_thr_progress.c71
-rw-r--r--erts/emulator/beam/erl_thr_progress.h50
-rw-r--r--erts/emulator/beam/erl_thr_queue.c127
-rw-r--r--erts/emulator/beam/erl_thr_queue.h27
-rw-r--r--erts/emulator/beam/erl_threads.h1486
-rw-r--r--erts/emulator/beam/erl_time.h98
-rw-r--r--erts/emulator/beam/erl_time_sup.c345
-rw-r--r--erts/emulator/beam/erl_trace.c631
-rw-r--r--erts/emulator/beam/erl_trace.h28
-rw-r--r--erts/emulator/beam/erl_unicode.c375
-rw-r--r--erts/emulator/beam/erl_unicode.h5
-rw-r--r--erts/emulator/beam/erl_utils.h122
-rw-r--r--erts/emulator/beam/erl_vm.h59
-rw-r--r--erts/emulator/beam/erlang_dtrace.d83
-rw-r--r--erts/emulator/beam/erlang_lttng.h17
-rw-r--r--erts/emulator/beam/error.h91
-rw-r--r--erts/emulator/beam/export.c75
-rw-r--r--erts/emulator/beam/export.h28
-rw-r--r--erts/emulator/beam/external.c864
-rw-r--r--erts/emulator/beam/external.h60
-rw-r--r--erts/emulator/beam/float_instrs.tab88
-rw-r--r--erts/emulator/beam/global.h495
-rw-r--r--erts/emulator/beam/hash.c8
-rw-r--r--erts/emulator/beam/hash.h4
-rw-r--r--erts/emulator/beam/index.c15
-rw-r--r--erts/emulator/beam/index.h18
-rw-r--r--erts/emulator/beam/instrs.tab973
-rw-r--r--erts/emulator/beam/io.c2709
-rw-r--r--erts/emulator/beam/lttng-wrapper.h6
-rw-r--r--erts/emulator/beam/macros.tab173
-rw-r--r--erts/emulator/beam/map_instrs.tab159
-rw-r--r--erts/emulator/beam/module.c89
-rw-r--r--erts/emulator/beam/module.h26
-rw-r--r--erts/emulator/beam/msg_instrs.tab403
-rw-r--r--erts/emulator/beam/ops.tab754
-rw-r--r--erts/emulator/beam/packet_parser.c10
-rw-r--r--erts/emulator/beam/register.c121
-rw-r--r--erts/emulator/beam/register.h2
-rw-r--r--erts/emulator/beam/safe_hash.c78
-rw-r--r--erts/emulator/beam/safe_hash.h17
-rw-r--r--erts/emulator/beam/select_instrs.tab190
-rw-r--r--erts/emulator/beam/sys.h391
-rw-r--r--erts/emulator/beam/time.c1527
-rw-r--r--erts/emulator/beam/trace_instrs.tab168
-rw-r--r--erts/emulator/beam/utils.c995
178 files changed, 57628 insertions, 39668 deletions
diff --git a/erts/emulator/beam/arith_instrs.tab b/erts/emulator/beam/arith_instrs.tab
new file mode 100644
index 0000000000..b828e86788
--- /dev/null
+++ b/erts/emulator/beam/arith_instrs.tab
@@ -0,0 +1,396 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+OUTLINED_ARITH_2(Fail, Live, Name, BIF, Op1, Op2, Dst) {
+ Eterm result;
+ Uint live = $Live;
+ HEAVY_SWAPOUT;
+ reg[live] = $Op1;
+ reg[live+1] = $Op2;
+ result = erts_gc_$Name (c_p, reg, live);
+ HEAVY_SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ if (ERTS_LIKELY(is_value(result))) {
+ $REFRESH_GEN_DEST();
+ $Dst = result;
+ $NEXT0();
+ }
+ $BIF_ERROR_ARITY_2($Fail, $BIF, reg[live], reg[live+1]);
+}
+
+
+i_plus := plus.fetch.execute;
+
+plus.head() {
+ Eterm PlusOp1, PlusOp2;
+}
+
+plus.fetch(Op1, Op2) {
+ PlusOp1 = $Op1;
+ PlusOp2 = $Op2;
+}
+
+plus.execute(Fail, Live, Dst) {
+ if (ERTS_LIKELY(is_both_small(PlusOp1, PlusOp2))) {
+ Sint i = signed_val(PlusOp1) + signed_val(PlusOp2);
+ if (ERTS_LIKELY(IS_SSMALL(i))) {
+ $Dst = make_small(i);
+ $NEXT0();
+ }
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, mixed_plus, BIF_splus_2, PlusOp1, PlusOp2, $Dst);
+}
+
+i_minus := minus.fetch.execute;
+
+minus.head() {
+ Eterm MinusOp1, MinusOp2;
+}
+
+minus.fetch(Op1, Op2) {
+ MinusOp1 = $Op1;
+ MinusOp2 = $Op2;
+}
+
+minus.execute(Fail, Live, Dst) {
+ if (ERTS_LIKELY(is_both_small(MinusOp1, MinusOp2))) {
+ Sint i = signed_val(MinusOp1) - signed_val(MinusOp2);
+ if (ERTS_LIKELY(IS_SSMALL(i))) {
+ $Dst = make_small(i);
+ $NEXT0();
+ }
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, mixed_minus, BIF_sminus_2, MinusOp1, MinusOp2, $Dst);
+}
+
+i_increment := increment.fetch.execute;
+
+increment.head() {
+ Eterm increment_reg_val;
+}
+
+increment.fetch(Src) {
+ increment_reg_val = $Src;
+}
+
+increment.execute(IncrementVal, Live, Dst) {
+ Eterm increment_val = $IncrementVal;
+ Uint live;
+ Eterm result;
+
+ if (ERTS_LIKELY(is_small(increment_reg_val))) {
+ Sint i = signed_val(increment_reg_val) + increment_val;
+ if (ERTS_LIKELY(IS_SSMALL(i))) {
+ $Dst = make_small(i);
+ $NEXT0();
+ }
+ }
+ live = $Live;
+ HEAVY_SWAPOUT;
+ reg[live] = increment_reg_val;
+ reg[live+1] = make_small(increment_val);
+ result = erts_gc_mixed_plus(c_p, reg, live);
+ HEAVY_SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ if (ERTS_LIKELY(is_value(result))) {
+ $REFRESH_GEN_DEST();
+ $Dst = result;
+ $NEXT0();
+ }
+ ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
+ goto find_func_info;
+}
+
+i_times(Fail, Live, Op1, Op2, Dst) {
+ Eterm op1 = $Op1;
+ Eterm op2 = $Op2;
+ $OUTLINED_ARITH_2($Fail, $Live, mixed_times, BIF_stimes_2, op1, op2, $Dst);
+}
+
+i_m_div(Fail, Live, Op1, Op2, Dst) {
+ Eterm op1 = $Op1;
+ Eterm op2 = $Op2;
+ $OUTLINED_ARITH_2($Fail, $Live, mixed_div, BIF_div_2, op1, op2, $Dst);
+}
+
+i_int_div(Fail, Live, Op1, Op2, Dst) {
+ Eterm op1 = $Op1;
+ Eterm op2 = $Op2;
+ if (ERTS_UNLIKELY(op2 == SMALL_ZERO)) {
+ c_p->freason = BADARITH;
+ $BIF_ERROR_ARITY_2($Fail, BIF_intdiv_2, op1, op2);
+ } else if (ERTS_LIKELY(is_both_small(op1, op2))) {
+ Sint ires = signed_val(op1) / signed_val(op2);
+ if (ERTS_LIKELY(IS_SSMALL(ires))) {
+ $Dst = make_small(ires);
+ $NEXT0();
+ }
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, int_div, BIF_intdiv_2, op1, op2, $Dst);
+}
+
+i_rem := rem.fetch.execute;
+
+rem.head() {
+ Eterm RemOp1, RemOp2;
+}
+
+rem.fetch(Src1, Src2) {
+ RemOp1 = $Src1;
+ RemOp2 = $Src2;
+}
+
+rem.execute(Fail, Live, Dst) {
+ if (ERTS_UNLIKELY(RemOp2 == SMALL_ZERO)) {
+ c_p->freason = BADARITH;
+ $BIF_ERROR_ARITY_2($Fail, BIF_rem_2, RemOp1, RemOp2);
+ } else if (ERTS_LIKELY(is_both_small(RemOp1, RemOp2))) {
+ $Dst = make_small(signed_val(RemOp1) % signed_val(RemOp2));
+ $NEXT0();
+ } else {
+ $OUTLINED_ARITH_2($Fail, $Live, int_rem, BIF_rem_2, RemOp1, RemOp2, $Dst);
+ }
+}
+
+i_band := band.fetch.execute;
+
+band.head() {
+ Eterm BandOp1, BandOp2;
+}
+
+band.fetch(Src1, Src2) {
+ BandOp1 = $Src1;
+ BandOp2 = $Src2;
+}
+
+band.execute(Fail, Live, Dst) {
+ if (ERTS_LIKELY(is_both_small(BandOp1, BandOp2))) {
+ /*
+ * No need to untag -- TAG & TAG == TAG.
+ */
+ $Dst = BandOp1 & BandOp2;
+ $NEXT0();
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, band, BIF_band_2, BandOp1, BandOp2, $Dst);
+}
+
+i_bor(Fail, Live, Src1, Src2, Dst) {
+ if (ERTS_LIKELY(is_both_small($Src1, $Src2))) {
+ /*
+ * No need to untag -- TAG | TAG == TAG.
+ */
+ $Dst = $Src1 | $Src2;
+ $NEXT0();
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, bor, BIF_bor_2, $Src1, $Src2, $Dst);
+}
+
+i_bxor(Fail, Live, Src1, Src2, Dst) {
+ if (ERTS_LIKELY(is_both_small($Src1, $Src2))) {
+ /*
+ * TAG ^ TAG == 0.
+ *
+ * Therefore, we perform the XOR operation on the tagged values,
+ * and OR in the tag bits.
+ */
+ $Dst = ($Src1 ^ $Src2) | make_small(0);
+ $NEXT0();
+ }
+ $OUTLINED_ARITH_2($Fail, $Live, bxor, BIF_bxor_2, $Src1, $Src2, $Dst);
+}
+
+i_bsl := shift.setup_bsl.execute;
+i_bsr := shift.setup_bsr.execute;
+
+shift.head() {
+ Eterm Op1, Op2;
+ Sint shift_left_count;
+}
+
+shift.setup_bsr(Src1, Src2) {
+ Op1 = $Src1;
+ Op2 = $Src2;
+ shift_left_count = 0;
+ if (ERTS_LIKELY(is_small(Op2))) {
+ shift_left_count = -signed_val(Op2);
+ } else if (is_big(Op2)) {
+ /*
+ * N bsr NegativeBigNum == N bsl MAX_SMALL
+ * N bsr PositiveBigNum == N bsl MIN_SMALL
+ */
+ shift_left_count = make_small(bignum_header_is_neg(*big_val(Op2)) ?
+ MAX_SMALL : MIN_SMALL);
+ }
+}
+
+shift.setup_bsl(Src1, Src2) {
+ Op1 = $Src1;
+ Op2 = $Src2;
+ shift_left_count = 0;
+ if (ERTS_LIKELY(is_small(Op2))) {
+ shift_left_count = signed_val(Op2);
+ } else if (is_big(Op2)) {
+ if (bignum_header_is_neg(*big_val(Op2))) {
+ /*
+ * N bsl NegativeBigNum is either 0 or -1, depending on
+ * the sign of N. Since we don't believe this case
+ * is common, do the calculation with the minimum
+ * amount of code.
+ */
+ shift_left_count = MIN_SMALL;
+ } else if (is_integer(Op1)) {
+ /*
+ * N bsl PositiveBigNum is too large to represent.
+ */
+ shift_left_count = MAX_SMALL;
+ }
+ }
+}
+
+shift.execute(Fail, Live, Dst) {
+ Uint big_words_needed;
+
+ if (ERTS_LIKELY(is_small(Op1))) {
+ Sint int_res = signed_val(Op1);
+ if (ERTS_UNLIKELY(shift_left_count == 0 || int_res == 0)) {
+ if (ERTS_UNLIKELY(is_not_integer(Op2))) {
+ goto shift_error;
+ }
+ if (int_res == 0) {
+ $Dst = Op1;
+ $NEXT0();
+ }
+ } else if (shift_left_count < 0) { /* Right shift */
+ Eterm bsr_res;
+ shift_left_count = -shift_left_count;
+ if (shift_left_count >= SMALL_BITS-1) {
+ bsr_res = (int_res < 0) ? SMALL_MINUS_ONE : SMALL_ZERO;
+ } else {
+ bsr_res = make_small(int_res >> shift_left_count);
+ }
+ $Dst = bsr_res;
+ $NEXT0();
+ } else if (shift_left_count < SMALL_BITS-1) { /* Left shift */
+ if ((int_res > 0 &&
+ ((~(Uint)0 << ((SMALL_BITS-1)-shift_left_count)) & int_res) == 0) ||
+ ((~(Uint)0 << ((SMALL_BITS-1)-shift_left_count)) & ~int_res) == 0) {
+ $Dst = make_small(int_res << shift_left_count);
+ $NEXT0();
+ }
+ }
+ big_words_needed = 1; /* big_size(small_to_big(Op1)) */
+ goto big_shift;
+ } else if (is_big(Op1)) {
+ if (shift_left_count == 0) {
+ if (is_not_integer(Op2)) {
+ goto shift_error;
+ }
+ $Dst = Op1;
+ $NEXT0();
+ }
+ big_words_needed = big_size(Op1);
+
+ big_shift:
+ if (shift_left_count > 0) { /* Left shift. */
+ big_words_needed += (shift_left_count / D_EXP);
+ } else { /* Right shift. */
+ if (big_words_needed <= (-shift_left_count / D_EXP)) {
+ big_words_needed = 3; /* ??? */
+ } else {
+ big_words_needed -= (-shift_left_count / D_EXP);
+ }
+ }
+ {
+ Eterm tmp_big[2];
+ Sint big_need_size = BIG_NEED_SIZE(big_words_needed+1);
+
+ /*
+ * Slightly conservative check the size to avoid
+ * allocating huge amounts of memory for bignums that
+ * clearly would overflow the arity in the header
+ * word.
+ */
+ if (big_need_size-8 > BIG_ARITY_MAX) {
+ $SYSTEM_LIMIT($Fail);
+ }
+ $GC_TEST_PRESERVE(big_need_size+1, $Live, Op1);
+ if (is_small(Op1)) {
+ Op1 = small_to_big(signed_val(Op1), tmp_big);
+ }
+ Op1 = big_lshift(Op1, shift_left_count, HTOP);
+ if (is_big(Op1)) {
+ HTOP += bignum_header_arity(*HTOP) + 1;
+ }
+ HEAP_SPACE_VERIFIED(0);
+ if (ERTS_UNLIKELY(is_nil(Op1))) {
+ /*
+ * This result must have been only slighty larger
+ * than allowed since it wasn't caught by the
+ * previous test.
+ */
+ $SYSTEM_LIMIT($Fail);
+ }
+ ERTS_HOLE_CHECK(c_p);
+ $REFRESH_GEN_DEST();
+ $Dst = Op1;
+ $NEXT0();
+ }
+ }
+
+ /*
+ * One or more non-integer arguments.
+ */
+ shift_error:
+ c_p->freason = BADARITH;
+ if ($Fail) {
+ $FAIL($Fail);
+ } else {
+ reg[0] = Op1;
+ reg[1] = Op2;
+ SWAPOUT;
+ if (IsOpCode(I[0], i_bsl_ssjtd)) {
+ I = handle_error(c_p, I, reg, &bif_export[BIF_bsl_2]->info.mfa);
+ } else {
+ ASSERT(IsOpCode(I[0], i_bsr_ssjtd));
+ I = handle_error(c_p, I, reg, &bif_export[BIF_bsr_2]->info.mfa);
+ }
+ goto post_error_handling;
+ }
+}
+
+i_int_bnot(Fail, Src, Live, Dst) {
+ Eterm bnot_val = $Src;
+ if (ERTS_LIKELY(is_small(bnot_val))) {
+ bnot_val = make_small(~signed_val(bnot_val));
+ } else {
+ Uint live = $Live;
+ HEAVY_SWAPOUT;
+ reg[live] = bnot_val;
+ bnot_val = erts_gc_bnot(c_p, reg, live);
+ HEAVY_SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ if (ERTS_UNLIKELY(is_nil(bnot_val))) {
+ $BIF_ERROR_ARITY_1($Fail, BIF_bnot_1, reg[live]);
+ }
+ $REFRESH_GEN_DEST();
+ }
+ $Dst = bnot_val;
+}
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index a5e778e4aa..59b51fd15e 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -34,20 +34,18 @@
IndexTable erts_atom_table; /* The index table */
-#include "erl_smp.h"
+static erts_rwmtx_t atom_table_lock;
-static erts_smp_rwmtx_t atom_table_lock;
-
-#define atom_read_lock() erts_smp_rwmtx_rlock(&atom_table_lock)
-#define atom_read_unlock() erts_smp_rwmtx_runlock(&atom_table_lock)
-#define atom_write_lock() erts_smp_rwmtx_rwlock(&atom_table_lock)
-#define atom_write_unlock() erts_smp_rwmtx_rwunlock(&atom_table_lock)
+#define atom_read_lock() erts_rwmtx_rlock(&atom_table_lock)
+#define atom_read_unlock() erts_rwmtx_runlock(&atom_table_lock)
+#define atom_write_lock() erts_rwmtx_rwlock(&atom_table_lock)
+#define atom_write_unlock() erts_rwmtx_rwunlock(&atom_table_lock)
#if 0
#define ERTS_ATOM_PUT_OPS_STAT
#endif
#ifdef ERTS_ATOM_PUT_OPS_STAT
-static erts_smp_atomic_t atom_put_ops;
+static erts_atomic_t atom_put_ops;
#endif
/* Functions for allocating space for the ext of atoms. We do not
@@ -68,7 +66,7 @@ static Uint atom_space; /* Amount of atom text space used */
/*
* Print info about atom tables
*/
-void atom_info(int to, void *to_arg)
+void atom_info(fmtfn_t to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
@@ -76,7 +74,7 @@ void atom_info(int to, void *to_arg)
index_info(to, to_arg, &erts_atom_table);
#ifdef ERTS_ATOM_PUT_OPS_STAT
erts_print(to, to_arg, "atom_put_ops: %ld\n",
- erts_smp_atomic_read_nob(&atom_put_ops));
+ erts_atomic_read_nob(&atom_put_ops));
#endif
if (lock)
@@ -138,7 +136,7 @@ atom_hash(Atom* obj)
while(len--) {
v = *p++;
/* latin1 clutch for r16 */
- if ((v & 0xFE) == 0xC2 && (*p & 0xC0) == 0x80) {
+ if (len && (v & 0xFE) == 0xC2 && (*p & 0xC0) == 0x80) {
v = (v << 6) | (*p & 0x3F);
p++; len--;
}
@@ -176,7 +174,7 @@ atom_alloc(Atom* tmpl)
/*
* Precompute ordinal value of first 3 bytes + 7 bits.
- * This is used by utils.c:erts_cmp_atoms().
+ * This is used by erl_utils.h:erts_cmp_atoms().
* We cannot use the full 32 bits of the first 4 bytes,
* since we use the sign of the difference between two
* ordinal values to represent their relative order.
@@ -199,7 +197,7 @@ atom_alloc(Atom* tmpl)
static void
atom_free(Atom* obj)
{
- erts_free(ERTS_ALC_T_ATOM, (void*) obj);
+ ASSERT(obj->slot.index == atom_val(am_ErtsSecretAtom));
}
static void latin1_to_utf8(byte* conv_buf, const byte** srcp, int* lenp)
@@ -233,10 +231,10 @@ need_convertion:
}
/*
- * erts_atom_put() may fail. If it fails THE_NON_VALUE is returned!
+ * erts_atom_put_index() may fail. Returns negative indexes for errors.
*/
-Eterm
-erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
+int
+erts_atom_put_index(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
{
byte utf8_copy[MAX_ATOM_SZ_FROM_LATIN1];
const byte *text = name;
@@ -246,14 +244,14 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
int aix;
#ifdef ERTS_ATOM_PUT_OPS_STAT
- erts_smp_atomic_inc_nob(&atom_put_ops);
+ erts_atomic_inc_nob(&atom_put_ops);
#endif
if (tlen < 0) {
if (trunc)
tlen = 0;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
switch (enc) {
@@ -262,7 +260,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
if (trunc)
tlen = MAX_ATOM_CHARACTERS;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
#ifdef DEBUG
for (aix = 0; aix < len; aix++) {
@@ -276,7 +274,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
if (trunc)
tlen = MAX_ATOM_CHARACTERS;
else
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
}
no_latin1_chars = tlen;
latin1_to_utf8(utf8_copy, &text, &tlen);
@@ -284,7 +282,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
case ERTS_ATOM_ENC_UTF8:
/* First sanity check; need to verify later */
if (tlen > MAX_ATOM_SZ_LIMIT && !trunc)
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
break;
}
@@ -295,7 +293,7 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
atom_read_unlock();
if (aix >= 0) {
/* Already in table no need to verify it */
- return make_atom(aix);
+ return aix;
}
if (enc == ERTS_ATOM_ENC_UTF8) {
@@ -314,13 +312,13 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
case ERTS_UTF8_OK_MAX_CHARS:
/* Truncated... */
if (!trunc)
- return THE_NON_VALUE;
+ return ATOM_MAX_CHARS_ERROR;
ASSERT(no_chars == MAX_ATOM_CHARACTERS);
tlen = err_pos - text;
break;
default:
/* Bad utf8... */
- return THE_NON_VALUE;
+ return ATOM_BAD_ENCODING_ERROR;
}
}
@@ -333,7 +331,20 @@ erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
atom_write_lock();
aix = index_put(&erts_atom_table, (void*) &a);
atom_write_unlock();
- return make_atom(aix);
+ return aix;
+}
+
+/*
+ * erts_atom_put() may fail. If it fails THE_NON_VALUE is returned!
+ */
+Eterm
+erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc)
+{
+ int aix = erts_atom_put_index(name, len, enc, trunc);
+ if (aix >= 0)
+ return make_atom(aix);
+ else
+ return THE_NON_VALUE;
}
Eterm
@@ -346,32 +357,24 @@ am_atom_put(const char* name, int len)
int atom_table_size(void)
{
int ret;
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
atom_read_lock();
-#endif
ret = erts_atom_table.entries;
-#ifdef ERTS_SMP
if (lock)
atom_read_unlock();
-#endif
return ret;
}
int atom_table_sz(void)
{
int ret;
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
atom_read_lock();
-#endif
ret = index_table_sz(&erts_atom_table);
-#ifdef ERTS_SMP
if (lock)
atom_read_unlock();
-#endif
return ret;
}
@@ -399,19 +402,15 @@ erts_atom_get(const char *name, int len, Eterm* ap, ErtsAtomEncoding enc)
void
erts_atom_get_text_space_sizes(Uint *reserved, Uint *used)
{
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
atom_read_lock();
-#endif
if (reserved)
*reserved = reserved_atom_space;
if (used)
*used = atom_space;
-#ifdef ERTS_SMP
if (lock)
atom_read_unlock();
-#endif
}
void
@@ -420,16 +419,17 @@ init_atom_table(void)
HashFunctions f;
int i;
Atom a;
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
#ifdef ERTS_ATOM_PUT_OPS_STAT
- erts_smp_atomic_init_nob(&atom_put_ops, 0);
+ erts_atomic_init_nob(&atom_put_ops, 0);
#endif
- erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab");
+ erts_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) atom_hash;
f.cmp = (HCMP_FUN) atom_cmp;
@@ -452,7 +452,7 @@ init_atom_table(void)
/* Ordinary atoms */
for (i = 0; erl_atom_names[i] != 0; i++) {
int ix;
- a.len = strlen(erl_atom_names[i]);
+ a.len = sys_strlen(erl_atom_names[i]);
a.latin1_chars = a.len;
a.name = (byte*)erl_atom_names[i];
a.slot.index = i;
@@ -467,10 +467,13 @@ init_atom_table(void)
atom_space -= a.len;
atom_tab(ix)->name = (byte*)erl_atom_names[i];
}
+
+ /* Hide am_ErtsSecretAtom */
+ hash_erase(&erts_atom_table.htable, atom_tab(atom_val(am_ErtsSecretAtom)));
}
void
-dump_atoms(int to, void *to_arg)
+dump_atoms(fmtfn_t to, void *to_arg)
{
int i = erts_atom_table.entries;
@@ -483,3 +486,9 @@ dump_atoms(int to, void *to_arg)
}
}
}
+
+Uint
+erts_get_atom_limit(void)
+{
+ return erts_atom_table.limit;
+}
diff --git a/erts/emulator/beam/atom.h b/erts/emulator/beam/atom.h
index ae60904785..ca920679c6 100644
--- a/erts/emulator/beam/atom.h
+++ b/erts/emulator/beam/atom.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -29,12 +29,14 @@
#define MAX_ATOM_SZ_LIMIT (4*MAX_ATOM_CHARACTERS) /* theoretical byte limit */
#define ATOM_LIMIT (1024*1024)
#define MIN_ATOM_TABLE_SIZE 8192
+#define ATOM_BAD_ENCODING_ERROR -1
+#define ATOM_MAX_CHARS_ERROR -2
#ifndef ARCH_32
/* Internal atom cache needs MAX_ATOM_TABLE_SIZE to be less than an
unsigned 32 bit integer. See external.c(erts_encode_ext_dist_header_setup)
for more details. */
-#define MAX_ATOM_TABLE_SIZE ((MAX_ATOM_INDEX + 1 < (UWORD_CONSTANT(1) << 32)) ? MAX_ATOM_INDEX + 1 : (UWORD_CONSTANT(1) << 32))
+#define MAX_ATOM_TABLE_SIZE ((MAX_ATOM_INDEX + 1 < (UWORD_CONSTANT(1) << 32)) ? MAX_ATOM_INDEX + 1 : ((UWORD_CONSTANT(1) << 31) - 1)) /* Here we use maximum signed interger value to avoid integer overflow */
#else
#define MAX_ATOM_TABLE_SIZE (MAX_ATOM_INDEX + 1)
#endif
@@ -133,11 +135,11 @@ int atom_table_sz(void); /* table size in bytes, excluding stored objects */
Eterm am_atom_put(const char*, int); /* ONLY 7-bit ascii! */
Eterm erts_atom_put(const byte *name, int len, ErtsAtomEncoding enc, int trunc);
-int atom_erase(byte*, int);
-int atom_static_put(byte*, int);
+int erts_atom_put_index(const byte *name, int len, ErtsAtomEncoding enc, int trunc);
void init_atom_table(void);
-void atom_info(int, void *);
-void dump_atoms(int, void *);
+void atom_info(fmtfn_t, void *);
+void dump_atoms(fmtfn_t, void *);
+Uint erts_get_atom_limit(void);
int erts_atom_get(const char* name, int len, Eterm* ap, ErtsAtomEncoding enc);
void erts_atom_get_text_space_sizes(Uint *reserved, Uint *used);
#endif
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 2b66bf6f4e..a14f22b19e 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2016. All Rights Reserved.
+# Copyright Ericsson AB 1996-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -48,7 +48,7 @@ atom Empty=''
#
# Used in the Beam emulator loop. (Smaller literals usually means tighter code.)
#
-atom fun infinity timeout normal call return
+atom infinity timeout normal call return
atom throw error exit
atom undefined
@@ -59,6 +59,9 @@ atom nocatch
atom undefined_function
atom undefined_lambda
+# Secret internal atom that can never be found by string lookup
+# and should never leak out to be seen by the user.
+atom ErtsSecretAtom='3RT$'
# All other atoms. Try to keep the order alphabetic.
#
@@ -66,13 +69,13 @@ atom DOWN='DOWN'
atom UP='UP'
atom EXIT='EXIT'
atom abort
-atom aborted
atom abs_path
atom absoluteURI
atom ac
-atom accessor
atom active
atom active_tasks
+atom active_tasks_all
+atom alive
atom all
atom all_but_first
atom all_names
@@ -83,7 +86,6 @@ atom allocated_areas
atom allocator
atom allocator_sizes
atom alloc_util_allocators
-atom allow_gc
atom allow_passive_connect
atom already_loaded
atom amd64
@@ -103,6 +105,8 @@ atom asynchronous
atom atom
atom atom_used
atom attributes
+atom auto_connect
+atom await_exit
atom await_microstate_accounting_modifications
atom await_port_send_result
atom await_proc_exit
@@ -116,9 +120,7 @@ atom bag
atom band
atom big
atom bif_return_trap
-atom bif_timer_server
atom binary
-atom binary_bin_to_list_trap
atom binary_copy_trap
atom binary_find_trap
atom binary_longest_prefix_trap
@@ -140,6 +142,7 @@ atom bsr
atom bsr_anycrlf
atom bsr_unicode
atom build_type
+atom busy
atom busy_dist_port
atom busy_port
atom call
@@ -173,13 +176,13 @@ atom convert_time_unit
atom connect
atom connected
atom connection_closed
-atom cons
atom const
atom context_switches
atom control
atom copy
atom copy_literals
atom counters
+atom count
atom cpu
atom cpu_timestamp
atom cr
@@ -191,35 +194,36 @@ atom current_stacktrace
atom data
atom debug_flags
atom decimals
+atom default
atom delay_trap
-atom dexit
-atom depth
-atom dgroup_leader
atom dictionary
+atom dirty_bif_exception
+atom dirty_bif_result
+atom dirty_bif_trap
atom dirty_cpu
atom dirty_cpu_schedulers_online
atom dirty_execution
atom dirty_io
+atom dirty_nif_exception
+atom dirty_nif_finalizer
atom disable_trace
atom disabled
atom discard
atom display_items
atom dist
atom dist_cmd
+atom dist_ctrl_put_data
+atom dist_data
atom Div='/'
atom div
-atom dlink
atom dmonitor_node
-atom dmonitor_p
atom DollarDollar='$$'
atom DollarUnderscore='$_'
atom dollar_endonly
atom dotall
atom driver
atom driver_options
-atom dsend
atom dsend_continue_trap
-atom dunlink
atom duplicate_bag
atom duplicated
atom dupnames
@@ -236,19 +240,21 @@ atom Eq='=:='
atom Eqeq='=='
atom erl_tracer
atom erlang
-atom ERROR='ERROR'
+atom erl_signal_server
atom error_handler
atom error_logger
atom erts_code_purger
+atom erts_debug
+atom erts_dflags
atom erts_internal
atom ets
atom ETS_TRANSFER='ETS-TRANSFER'
-atom event
atom exact_reductions
atom exception_from
atom exception_trace
atom exclusive
atom exit_status
+atom exited
atom existing
atom existing_processes
atom existing_ports
@@ -270,27 +276,21 @@ atom force
atom format_cpu_topology
atom free
atom fullsweep_after
-atom fullsweep_if_old_binaries
-atom fun
-atom function
atom functions
atom function_clause
atom garbage_collecting
atom garbage_collection
atom garbage_collection_info
-atom gc_end
atom gc_major_end
atom gc_major_start
atom gc_max_heap_size
atom gc_minor_end
atom gc_minor_start
-atom gc_start
atom Ge='>='
atom generational
-atom get_data
+atom get_all_trap
atom get_seq_token
atom get_tcw
-atom getenv
atom gather_gc_info_result
atom gather_io_bytes
atom gather_microstate_accounting_result
@@ -302,6 +302,7 @@ atom global
atom Gt='>'
atom grun
atom group_leader
+atom handle
atom have_dt_utag
atom heap_block_size
atom heap_size
@@ -326,16 +327,17 @@ atom index
atom infinity
atom info
atom info_msg
+atom info_trap
atom init
atom initial_call
atom input
atom internal
atom internal_error
-atom internal_status
atom instruction_counts
atom invalid
atom is_constant
atom is_seq_trace
+atom iterator
atom io
atom keypos
atom kill
@@ -361,11 +363,14 @@ atom loaded
atom load_cancelled
atom load_failure
atom local
+atom logger
atom long_gc
atom long_schedule
atom low
atom Lt='<'
atom machine
+atom magic_ref
+atom major
atom match
atom match_limit
atom match_limit_recursion
@@ -374,14 +379,12 @@ atom match_spec_result
atom max
atom maximum
atom max_heap_size
-atom max_tables max_processes
atom mbuf_size
atom md5
atom memory
atom memory_internal
atom memory_types
atom message
-atom message_binary
atom message_queue_data
atom message_queue_len
atom messages
@@ -393,8 +396,10 @@ atom microsecond
atom microstate_accounting
atom milli_seconds
atom millisecond
+atom min
atom min_heap_size
atom min_bin_vheap_size
+atom minor
atom minor_version
atom Minus='-'
atom module
@@ -427,21 +432,18 @@ atom new_processes
atom new_ports
atom new_uniq
atom newline
-atom next
atom no
atom nomatch
atom none
atom no_auto_capture
atom noconnect
atom noconnection
-atom nocookie
atom node
atom node_type
atom nodedown
atom nodedown_reason
atom nodeup
atom noeol
-atom nofile
atom noproc
atom normal
atom nosuspend
@@ -449,6 +451,7 @@ atom no_float
atom no_integer
atom no_network
atom no_start_optimize
+atom not_suspended
atom not
atom not_a_list
atom not_loaded
@@ -463,7 +466,6 @@ atom notempty_atstart
atom notify
atom notsup
atom nouse_stdio
-atom objects
atom off_heap
atom offset
atom ok
@@ -533,7 +535,6 @@ atom re_run_trap
atom read_concurrency
atom ready_input
atom ready_output
-atom ready_async
atom reason
atom receive
atom recent_size
@@ -551,6 +552,7 @@ atom return_to
atom return_trace
atom run_queue
atom run_queue_lengths
+atom run_queue_lengths_all
atom runnable
atom runnable_ports
atom runnable_procs
@@ -560,14 +562,18 @@ atom running_procs
atom runtime
atom safe
atom save_calls
+atom sbct
atom scheduler
atom scheduler_id
+atom scheduler_wall_time
+atom scheduler_wall_time_all
atom schedulers_online
atom scheme
atom scientific
atom scope
atom second
atom seconds
+atom send
atom send_to_non_existing_process
atom sensitive
atom sequential_tracer
@@ -575,7 +581,6 @@ atom sequential_trace_token
atom serial
atom set
atom set_cpu_topology
-atom set_data
atom set_on_first_link
atom set_on_first_spawn
atom set_on_link
@@ -583,11 +588,21 @@ atom set_on_spawn
atom set_seq_token
atom set_tcw
atom set_tcw_fake
-atom separate
-atom shared
+atom sighup
+atom sigterm
+atom sigusr1
+atom sigusr2
+atom sigill
+atom sigchld
+atom sigabrt
+atom sigalrm
+atom sigstop
+atom sigint
+atom sigsegv
+atom sigtstp
+atom sigquit
atom silent
atom size
-atom sl_alloc
atom spawn_executable
atom spawn_driver
atom spawned
@@ -595,7 +610,6 @@ atom ssl_tls
atom stack_size
atom start
atom status
-atom static
atom stderr_to_stdout
atom stop
atom stream
@@ -605,13 +619,11 @@ atom sunrm
atom suspend
atom suspended
atom suspending
-atom sys_misc
atom system
-atom system_error
+atom system_flag_scheduler_wall_time
atom system_limit
atom system_version
atom system_architecture
-atom SYSTEM='SYSTEM'
atom table
atom term_to_binary_trap
atom this
@@ -624,10 +636,12 @@ atom Times='*'
atom timestamp
atom total
atom total_active_tasks
+atom total_active_tasks_all
atom total_heap_size
atom total_run_queue_lengths
+atom total_run_queue_lengths_all
atom tpkt
-atom trace trace_ts traced
+atom trace traced
atom trace_control_word
atom trace_status
atom tracer
@@ -636,7 +650,6 @@ atom trim
atom trim_all
atom try_clause
atom true
-atom tuple
atom type
atom ucompile
atom ucp
@@ -653,21 +666,19 @@ atom unblock_normal
atom uniq
atom unless_suspending
atom unloaded
-atom unloading
atom unloaded_only
atom unload_cancelled
atom value
-atom values
atom version
atom visible
atom waiting
atom wall_clock
atom warning
atom warning_msg
-atom scheduler_wall_time
atom wordsize
atom write_concurrency
atom xor
atom x86
atom yes
atom yield
+atom nifs
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 5969197168..bb1b2e5b27 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -36,6 +36,14 @@
#include "erl_nif.h"
#include "erl_bits.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
+#include "erl_proc_sig_queue.h"
+#ifdef HIPE
+# include "hipe_bif0.h"
+# define IF_HIPE(X) (X)
+#else
+# define IF_HIPE(X) (0)
+#endif
#ifdef HIPE
# include "hipe_stack.h"
@@ -43,7 +51,7 @@
static struct {
Eterm module;
- erts_smp_mtx_t mtx;
+ erts_mtx_t mtx;
Export *pending_purge_lambda;
Eterm *sprocs;
Eterm def_sprocs[10];
@@ -58,14 +66,10 @@ static struct {
Process *erts_code_purger = NULL;
-#ifdef ERTS_DIRTY_SCHEDULERS
-Process *erts_dirty_process_code_checker;
-#endif
-erts_smp_atomic_t erts_copy_literal_area__;
+erts_atomic_t erts_copy_literal_area__;
#define ERTS_SET_COPY_LITERAL_AREA(LA) \
- erts_smp_atomic_set_nob(&erts_copy_literal_area__, \
+ erts_atomic_set_nob(&erts_copy_literal_area__, \
(erts_aint_t) (LA))
-#ifdef ERTS_NEW_PURGE_STRATEGY
Process *erts_literal_area_collector = NULL;
typedef struct ErtsLiteralAreaRef_ ErtsLiteralAreaRef;
@@ -75,14 +79,13 @@ struct ErtsLiteralAreaRef_ {
};
struct {
- erts_smp_mtx_t mtx;
+ erts_mtx_t mtx;
ErtsLiteralAreaRef *first;
ErtsLiteralAreaRef *last;
} release_literal_areas;
-#endif
static void set_default_trace_pattern(Eterm module);
-static Eterm check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls);
+static Eterm check_process_code(Process* rp, Module* modp, int *redsp, int fcalls);
static void delete_code(Module* modp);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
@@ -92,7 +95,8 @@ init_purge_state(void)
{
purge_state.module = THE_NON_VALUE;
- erts_smp_mtx_init(&purge_state.mtx, "purge_state");
+ erts_mtx_init(&purge_state.mtx, "purge_state", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
purge_state.pending_purge_lambda =
erts_export_put(am_erts_code_purger, am_pending_purge_lambda, 3);
@@ -113,12 +117,12 @@ init_purge_state(void)
void
erts_beam_bif_load_init(void)
{
-#ifdef ERTS_NEW_PURGE_STRATEGY
- erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas");
+ erts_mtx_init(&release_literal_areas.mtx, "release_literal_areas", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
release_literal_areas.first = NULL;
release_literal_areas.last = NULL;
-#endif
- erts_smp_atomic_init_nob(&erts_copy_literal_area__,
+ erts_atomic_init_nob(&erts_copy_literal_area__,
(erts_aint_t) NULL);
init_purge_state();
@@ -147,18 +151,29 @@ BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
{
+#if !defined(HIPE)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+#else
Module* modp;
- Eterm res;
+ Eterm res, mod;
+
+ if (!is_internal_magic_ref(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ mod = erts_module_for_prepared_code(erts_magic_ref2bin(BIF_ARG_1));
+
+ if (is_not_atom(mod))
+ BIF_ERROR(BIF_P, BADARG);
if (!erts_try_seize_code_write_permission(BIF_P)) {
ERTS_BIF_YIELD3(bif_export[BIF_code_make_stub_module_3],
BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
- modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp && modp->curr.num_breakpoints > 0) {
ASSERT(modp->curr.code_hdr != NULL);
@@ -170,17 +185,21 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
- if (res == BIF_ARG_1) {
+ if (res == mod) {
erts_end_staging_code_ix();
erts_commit_staging_code_ix();
+ if (!modp)
+ modp = erts_get_module(mod, erts_active_code_ix());
+ hipe_redirect_to_module(modp);
}
else {
erts_abort_staging_code_ix();
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
return res;
+#endif
}
BIF_RETTYPE
@@ -213,9 +232,9 @@ prepare_loading_2(BIF_ALIST_2)
res = TUPLE2(hp, am_error, reason);
BIF_RET(res);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- res = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), magic);
- erts_refc_dec(&magic->refc, 1);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ res = erts_mk_magic_ref(&hp, &MSO(BIF_P), magic);
+ erts_refc_dec(&magic->intern.refc, 1);
BIF_RET(res);
}
@@ -223,15 +242,13 @@ BIF_RETTYPE
has_prepared_code_on_load_1(BIF_ALIST_1)
{
Eterm res;
- ProcBin* pb;
- if (!ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_1)) {
+ if (!is_internal_magic_ref(BIF_ARG_1)) {
error:
BIF_ERROR(BIF_P, BADARG);
}
- pb = (ProcBin*) binary_val(BIF_ARG_1);
- res = erts_has_code_on_load(pb->val);
+ res = erts_has_code_on_load(erts_magic_ref2bin(BIF_ARG_1));
if (res == NIL) {
goto error;
}
@@ -242,11 +259,10 @@ struct m {
Binary* code;
Eterm module;
Module* modp;
- Uint exception;
+ Eterm exception;
};
-static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int);
-#ifdef ERTS_SMP
+static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int, int);
static void smp_code_ix_commiter(void*);
static struct /* Protected by code_write_permission */
@@ -254,7 +270,6 @@ static struct /* Protected by code_write_permission */
Process* stager;
ErtsThrPrgrLaterOp lop;
} committer_state;
-#endif
static Eterm
exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions)
@@ -263,7 +278,7 @@ exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions)
Eterm res = NIL;
while (exceptions > 0) {
- if (mp->exception) {
+ if (is_value(mp->exception)) {
res = CONS(hp, mp->module, res);
hp += 2;
exceptions--;
@@ -317,13 +332,11 @@ finish_loading_1(BIF_ALIST_1)
for (i = 0; i < n; i++) {
Eterm* cons = list_val(BIF_ARG_1);
Eterm term = CAR(cons);
- ProcBin* pb;
- if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
+ if (!is_internal_magic_ref(term)) {
goto badarg;
}
- pb = (ProcBin*) binary_val(term);
- p[i].code = pb->val;
+ p[i].code = erts_magic_ref2bin(term);
p[i].module = erts_module_for_prepared_code(p[i].code);
if (p[i].module == NIL) {
goto badarg;
@@ -366,9 +379,9 @@ finish_loading_1(BIF_ALIST_1)
exceptions = 0;
for (i = 0; i < n; i++) {
- p[i].exception = 0;
+ p[i].exception = THE_NON_VALUE;
if (p[i].modp->seen) {
- p[i].exception = 1;
+ p[i].exception = am_duplicated;
exceptions++;
}
p[i].modp->seen = 1;
@@ -381,10 +394,11 @@ finish_loading_1(BIF_ALIST_1)
for (i = 0; i < n; i++) {
if (p[i].modp->curr.num_breakpoints > 0 ||
p[i].modp->curr.num_traced_exports > 0 ||
- erts_is_default_trace_enabled()) {
- /* tracing involved, fallback with thread blocking */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_is_default_trace_enabled() ||
+ IF_HIPE(hipe_need_blocking(p[i].modp))) {
+ /* tracing or hipe need thread blocking */
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
is_blocking = 1;
break;
}
@@ -401,9 +415,9 @@ finish_loading_1(BIF_ALIST_1)
exceptions = 0;
for (i = 0; i < n; i++) {
- p[i].exception = 0;
+ p[i].exception = THE_NON_VALUE;
if (p[i].modp->curr.code_hdr && p[i].modp->old.code_hdr) {
- p[i].exception = 1;
+ p[i].exception = am_not_purged;
exceptions++;
}
}
@@ -420,11 +434,11 @@ finish_loading_1(BIF_ALIST_1)
Eterm mod;
Eterm retval;
- erts_refc_inc(&p[i].code->refc, 1);
+ erts_refc_inc(&p[i].code->intern.refc, 1);
retval = erts_finish_loading(p[i].code, BIF_P, 0, &mod);
ASSERT(retval == NIL || retval == am_on_load);
if (retval == am_on_load) {
- p[i].exception = 1;
+ p[i].exception = am_on_load;
exceptions++;
}
}
@@ -440,46 +454,48 @@ finish_loading_1(BIF_ALIST_1)
}
done:
- return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n);
+ return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n, 1);
}
static Eterm
staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
- struct m* loaded, int nloaded)
+ struct m* mods, int nmods, int free_mods)
{
-#ifdef ERTS_SMP
if (is_blocking || !commit)
-#endif
{
if (commit) {
+ int i;
erts_end_staging_code_ix();
erts_commit_staging_code_ix();
- if (loaded) {
- int i;
- for (i=0; i < nloaded; i++) {
- set_default_trace_pattern(loaded[i].module);
+
+ for (i=0; i < nmods; i++) {
+ if (mods[i].modp->curr.code_hdr
+ && mods[i].exception != am_on_load) {
+ set_default_trace_pattern(mods[i].module);
}
+ #ifdef HIPE
+ hipe_redirect_to_module(mods[i].modp);
+ #endif
}
}
else {
erts_abort_staging_code_ix();
}
- if (loaded) {
- erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ if (free_mods) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, mods);
}
if (is_blocking) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
erts_release_code_write_permission();
return res;
}
-#ifdef ERTS_SMP
else {
ASSERT(is_value(res));
- if (loaded) {
- erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ if (free_mods) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, mods);
}
erts_end_staging_code_ix();
/*
@@ -499,11 +515,9 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
*/
ERTS_BIF_YIELD_RETURN(c_p, res);
}
-#endif
}
-#ifdef ERTS_SMP
static void smp_code_ix_commiter(void* null)
{
Process* p = committer_state.stager;
@@ -513,14 +527,13 @@ static void smp_code_ix_commiter(void* null)
committer_state.stager = NULL;
#endif
erts_release_code_write_permission();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
if (!ERTS_PROC_IS_EXITING(p)) {
erts_resume(p, ERTS_PROC_LOCK_STATUS);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
erts_proc_dec_refc(p);
}
-#endif /* ERTS_SMP */
@@ -547,7 +560,7 @@ check_old_code_1(BIF_ALIST_1)
}
Eterm
-erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls)
+erts_check_process_code(Process *c_p, Eterm module, int *redsp, int fcalls)
{
Module* modp;
Eterm res;
@@ -562,31 +575,23 @@ erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int
if (!modp)
return am_false;
erts_rlock_old_code(code_ix);
- res = (!modp->old.code_hdr ? am_false :
- check_process_code(c_p, modp, flags, redsp, fcalls));
+ res = (!modp->old.code_hdr
+ ? am_false
+ : check_process_code(c_p, modp, redsp, fcalls));
erts_runlock_old_code(code_ix);
return res;
}
-BIF_RETTYPE erts_internal_check_process_code_2(BIF_ALIST_2)
+BIF_RETTYPE erts_internal_check_process_code_1(BIF_ALIST_1)
{
int reds = 0;
- Uint flags;
Eterm res;
if (is_not_atom(BIF_ARG_1))
goto badarg;
- if (is_not_small(BIF_ARG_2))
- goto badarg;
-
- flags = unsigned_val(BIF_ARG_2);
- if (flags & ~ERTS_CPC_ALL) {
- goto badarg;
- }
-
- res = erts_check_process_code(BIF_P, BIF_ARG_1, flags, &reds, BIF_P->fcalls);
+ res = erts_check_process_code(BIF_P, BIF_ARG_1, &reds, BIF_P->fcalls);
ASSERT(is_value(res));
@@ -598,14 +603,14 @@ badarg:
BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
{
-#if !defined(ERTS_DIRTY_SCHEDULERS)
- BIF_ERROR(BIF_P, EXC_NOTSUP);
-#else
+ erts_aint32_t state;
Process *rp;
- int reds = 0;
+ int dirty, busy, reds = 0;
Eterm res;
- if (BIF_P != erts_dirty_process_code_checker)
+ if (BIF_P != erts_dirty_process_signal_handler
+ && BIF_P != erts_dirty_process_signal_handler_high
+ && BIF_P != erts_dirty_process_signal_handler_max)
BIF_ERROR(BIF_P, EXC_NOTSUP);
if (is_not_internal_pid(BIF_ARG_1))
@@ -614,23 +619,31 @@ BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
if (is_not_atom(BIF_ARG_2))
BIF_ERROR(BIF_P, BADARG);
- rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_check_dirty_process_code_2],
- BIF_P, BIF_ARG_1, BIF_ARG_2);
+ if (BIF_ARG_1 == BIF_P->common.id)
+ BIF_RET(am_normal);
+
+ rp = erts_proc_lookup_raw(BIF_ARG_1);
if (!rp)
- BIF_RET(am_false);
-
- res = erts_check_process_code(rp, BIF_ARG_2, 0, &reds, BIF_P->fcalls);
+ BIF_RET(am_false);
- if (BIF_P != rp)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ state = erts_atomic32_read_nob(&rp->state);
+ dirty = (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
+ if (!dirty)
+ BIF_RET(am_normal);
- ASSERT(is_value(res));
+ busy = erts_proc_trylock(rp, ERTS_PROC_LOCK_MAIN) == EBUSY;
+
+ if (busy)
+ BIF_RET(am_busy);
+
+ res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls);
+
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(res == am_true || res == am_false);
BIF_RET2(res, reds);
-#endif
}
BIF_RETTYPE delete_module_1(BIF_ALIST_1)
@@ -658,17 +671,18 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
}
else if (modp->old.code_hdr) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Module %T must be purged before loading\n",
+ erts_dsprintf(dsbufp, "Module %T must be purged before deleting\n",
BIF_ARG_1);
erts_send_error_to_logger(BIF_P->group_leader, dsbufp);
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
}
else {
if (modp->curr.num_breakpoints > 0 ||
- modp->curr.num_traced_exports > 0) {
- /* we have tracing, retry single threaded */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ modp->curr.num_traced_exports > 0 ||
+ IF_HIPE(hipe_need_blocking(modp))) {
+ /* tracing or hipe need to go single threaded */
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
is_blocking = 1;
if (modp->curr.num_breakpoints) {
erts_clear_module_break(modp);
@@ -680,7 +694,15 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
success = 1;
}
}
- return staging_epilogue(BIF_P, success, res, is_blocking, NULL, 0);
+ {
+ struct m mod;
+ Eterm retval;
+ mod.module = BIF_ARG_1;
+ mod.modp = modp;
+ mod.exception = THE_NON_VALUE;
+ retval = staging_epilogue(BIF_P, success, res, is_blocking, &mod, 1, 0);
+ return retval;
+ }
}
BIF_RETTYPE module_loaded_1(BIF_ALIST_1)
@@ -758,35 +780,44 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
ErtsCodeIndex code_ix;
Module* modp;
+ if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
if (!erts_try_seize_code_write_permission(BIF_P)) {
ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2],
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
- /* ToDo: Use code_ix staging instead of thread blocking */
-
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
-
code_ix = erts_active_code_ix();
modp = erts_get_module(BIF_ARG_1, code_ix);
- if (!modp || !modp->on_load || !modp->on_load->code_hdr) {
- error:
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ if (!modp || !modp->on_load || !modp->on_load->code_hdr
+ || !modp->on_load->code_hdr->on_load_function_ptr) {
+
erts_release_code_write_permission();
BIF_ERROR(BIF_P, BADARG);
}
- if (modp->on_load->code_hdr->on_load_function_ptr == NULL) {
- goto error;
- }
- if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
- goto error;
- }
if (BIF_ARG_2 == am_true) {
- int i;
+ struct m mods[1];
+ int is_blocking = 0;
+ int i, num_exps;
+
+ erts_start_staging_code_ix(0);
+ code_ix = erts_staging_code_ix();
+ modp = erts_get_module(BIF_ARG_1, code_ix);
+
+ ASSERT(modp && modp->on_load && modp->on_load->code_hdr
+ && modp->on_load->code_hdr->on_load_function_ptr);
+
+ if (erts_is_default_trace_enabled()
+ || IF_HIPE(hipe_need_blocking(modp))) {
+
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
+ is_blocking = 1;
+ }
/*
* Make the code with the on_load function current.
@@ -802,46 +833,51 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
/*
* The on_load function succeded. Fix up export entries.
*/
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
- if (ep == NULL || ep->code[0] != BIF_ARG_1) {
+ if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) {
continue;
}
- if (ep->code[4] != 0) {
- ep->addressv[code_ix] = (void *) ep->code[4];
- ep->code[4] = 0;
+ if (ep->beam[1] != 0) {
+ ep->addressv[code_ix] = (void *) ep->beam[1];
+ ep->beam[1] = 0;
} else {
- if (ep->addressv[code_ix] == ep->code+3 &&
- ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep->addressv[code_ix] == ep->beam &&
+ BeamIsOpCode(ep->beam[0], op_apply_bif)) {
continue;
}
- ep->addressv[code_ix] = ep->code+3;
- ep->code[3] = (BeamInstr) em_call_error_handler;
+ ep->addressv[code_ix] = ep->beam;
+ ep->beam[0] = BeamOpCodeAddr(op_call_error_handler);
}
}
modp->curr.code_hdr->on_load_function_ptr = NULL;
- set_default_trace_pattern(BIF_ARG_1);
- } else if (BIF_ARG_2 == am_false) {
- int i;
+
+ mods[0].modp = modp;
+ mods[0].module = BIF_ARG_1;
+ mods[0].exception = THE_NON_VALUE;
+ return staging_epilogue(BIF_P, 1, am_true, is_blocking, mods, 1, 0);
+ }
+ else if (BIF_ARG_2 == am_false) {
+ int i, num_exps;
/*
* The on_load function failed. Remove references to the
* code that is about to be purged from the export entries.
*/
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
- if (ep == NULL || ep->code[0] != BIF_ARG_1) {
+ if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) {
continue;
}
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (BeamIsOpCode(ep->beam[0], op_apply_bif)) {
continue;
}
- ep->code[4] = 0;
+ ep->beam[1] = 0;
}
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
BIF_RET(am_true);
}
@@ -861,9 +897,9 @@ set_default_trace_pattern(Eterm module)
&trace_pattern_flags,
&meta_tracer);
if (trace_pattern_is_on) {
- Eterm mfa[1];
- mfa[0] = module;
- (void) erts_set_trace_pattern(0, mfa, 1,
+ ErtsCodeMFA mfa;
+ mfa.module = module;
+ (void) erts_set_trace_pattern(0, &mfa, 1,
match_spec,
meta_match_spec,
1, trace_pattern_flags,
@@ -871,45 +907,69 @@ set_default_trace_pattern(Eterm module)
}
}
-#ifndef ERTS_NEW_PURGE_STRATEGY
-
-static ERTS_INLINE int
-check_mod_funs(Process *p, ErlOffHeap *off_heap, char *area, size_t area_size)
-{
- struct erl_off_heap_header* oh;
- for (oh = off_heap->first; oh; oh = oh->next) {
- if (thing_subtag(oh->thing_word) == FUN_SUBTAG) {
- ErlFunThing* funp = (ErlFunThing*) oh;
- if (ErtsInArea(funp->fe->address, area, area_size))
- return !0;
- }
- }
- return 0;
-}
-
-#endif
-
static Uint hfrag_literal_size(Eterm* start, Eterm* end,
char* lit_start, Uint lit_size);
static void hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
Eterm *start, Eterm *end,
char *lit_start, Uint lit_size);
-#ifdef ERTS_NEW_PURGE_STRATEGY
+static ERTS_INLINE void
+msg_copy_literal_area(ErtsMessage *msgp, int *redsp,
+ char *literals, Uint lit_bsize)
+{
+ ErlHeapFragment *hfrag, *hf;
+ Uint lit_sz = 0;
+
+ *redsp += 1;
+
+ if (!ERTS_SIG_IS_INTERNAL_MSG(msgp) || !msgp->data.attached)
+ return;
+
+ if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ hfrag = &msgp->hfrag;
+ else
+ hfrag = msgp->data.heap_frag;
+
+ for (hf = hfrag; hf; hf = hf->next) {
+ lit_sz += hfrag_literal_size(&hf->mem[0],
+ &hf->mem[hf->used_size],
+ literals, lit_bsize);
+ *redsp += 1;
+ }
+
+ *redsp += lit_sz / 16; /* Better value needed... */
+ if (lit_sz > 0) {
+ ErlHeapFragment *bp = new_message_buffer(lit_sz);
+ Eterm *hp = bp->mem;
+
+ for (hf = hfrag; hf; hf = hf->next) {
+ hfrag_literal_copy(&hp, &bp->off_heap,
+ &hf->mem[0],
+ &hf->mem[hf->used_size],
+ literals, lit_bsize);
+ hfrag = hf;
+ }
+
+ /* link new hfrag last */
+ ASSERT(hfrag->next == NULL);
+ hfrag->next = bp;
+ bp->next = NULL;
+ }
+}
Eterm
erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed)
{
ErtsLiteralArea *la;
- ErtsMessage *msgp;
struct erl_off_heap_header* oh;
char *literals;
Uint lit_bsize;
ErlHeapFragment *hfrag;
+ ErtsMessage *mfp;
la = ERTS_COPY_LITERAL_AREA();
if (!la)
- return am_ok;
+ goto return_ok;
oh = la->off_heap;
literals = (char *) &la->start[0];
@@ -922,47 +982,14 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
* any other heap than the message it self.
*/
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
- for (msgp = c_p->msg.first; msgp; msgp = msgp->next) {
- ErlHeapFragment *hf;
- Uint lit_sz = 0;
-
- *redsp += 1;
-
- if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG)
- hfrag = &msgp->hfrag;
- else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag)
- hfrag = msgp->data.heap_frag;
- else
- continue; /* Content on heap or in external term format... */
-
- for (hf = hfrag; hf; hf = hf->next) {
- lit_sz += hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size],
- literals, lit_bsize);
- *redsp += 1;
- }
-
- *redsp += lit_sz / 16; /* Better value needed... */
- if (lit_sz > 0) {
- ErlHeapFragment *bp = new_message_buffer(lit_sz);
- Eterm *hp = bp->mem;
-
- for (hf = hfrag; hf; hf = hf->next) {
- hfrag_literal_copy(&hp, &bp->off_heap,
- &hf->mem[0], &hf->mem[hf->used_size],
- literals, lit_bsize);
- hfrag = hf;
- }
-
- /* link new hfrag last */
- ASSERT(hfrag->next == NULL);
- hfrag->next = bp;
- bp->next = NULL;
- }
- }
+ ERTS_FOREACH_SIG_PRIVQS(c_p, msgp, msg_copy_literal_area(msgp,
+ redsp,
+ literals,
+ lit_bsize));
if (gc_allowed) {
/*
@@ -973,6 +1000,11 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
* this is not completely certain). We go for
* the GC directly instead of scanning everything
* one more time...
+ *
+ * Also note that calling functions expect a
+ * major GC to be performed if gc_allowed is set
+ * to true. If you change this, you need to fix
+ * callers...
*/
goto literal_gc;
}
@@ -995,6 +1027,12 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
if (any_heap_refs(c_p->heap, c_p->htop, literals, lit_bsize))
goto literal_gc;
*redsp += 1;
+ if (c_p->abandoned_heap) {
+ if (any_heap_refs(c_p->abandoned_heap, c_p->abandoned_heap + c_p->heap_sz,
+ literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+ }
if (any_heap_refs(c_p->old_heap, c_p->old_htop, literals, lit_bsize))
goto literal_gc;
@@ -1026,8 +1064,8 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
* process off heap structure.
* - Check for literals
*/
- for (msgp = c_p->msg_frag; msgp; msgp = msgp->next) {
- hfrag = erts_message_to_heap_frag(msgp);
+ for (mfp = c_p->msg_frag; mfp; mfp = mfp->next) {
+ hfrag = erts_message_to_heap_frag(mfp);
for (; hfrag; hfrag = hfrag->next) {
Eterm *hp, *hp_end;
@@ -1041,34 +1079,41 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
}
}
+return_ok:
+
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)))
+ c_p->flags &= ~F_DIRTY_CLA;
+
return am_ok;
literal_gc:
if (!gc_allowed)
- return am_need_gc;
+ return am_need_gc;
if (c_p->flags & F_DISABLE_GC)
- return THE_NON_VALUE;
-
- FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ return THE_NON_VALUE;
- *redsp += erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg, c_p->arity, fcalls);
+ *redsp += erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize,
+ oh, fcalls);
- erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize, oh);
-
- *redsp += lit_bsize / 64; /* Need, better value... */
+ if (c_p->flags & F_DIRTY_CLA)
+ return THE_NON_VALUE;
return am_ok;
}
static Eterm
-check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls)
+check_process_code(Process* rp, Module* modp, int *redsp, int fcalls)
{
BeamInstr* start;
char* mod_start;
Uint mod_size;
Eterm* sp;
+#ifdef HIPE
+ void *nat_start = NULL;
+ Uint nat_size = 0;
+#endif
*redsp += 1;
@@ -1086,6 +1131,11 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
|| ErtsInArea(rp->cp, mod_start, mod_size)) {
return am_true;
}
+
+ *redsp += 1;
+
+ if (erts_check_nif_export_in_area(rp, mod_start, mod_size))
+ return am_true;
*redsp += (STACK_START(rp) - rp->stop) / 32;
@@ -1098,78 +1148,19 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
}
}
- /*
- * Check all continuation pointers stored in stackdump
- * and clear exception stackdump if there is a pointer
- * to the module.
- */
- if (rp->ftrace != NIL) {
- struct StackTrace *s;
- ASSERT(is_list(rp->ftrace));
- s = (struct StackTrace *) big_val(CDR(list_val(rp->ftrace)));
- if ((s->pc && ErtsInArea(s->pc, mod_start, mod_size)) ||
- (s->current && ErtsInArea(s->current, mod_start, mod_size))) {
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- } else {
- int i;
- for (i = 0; i < s->depth; i++) {
- if (ErtsInArea(s->trace[i], mod_start, mod_size)) {
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- break;
- }
- }
- }
- }
-
- return am_false;
-}
-
-#else /* !ERTS_NEW_PURGE_STRATEGY, i.e, old style purge... */
-
-static Eterm
-check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls)
-{
- BeamInstr* start;
- char* literals;
- Uint lit_bsize;
- char* mod_start;
- Uint mod_size;
- Eterm* sp;
- int done_gc = 0;
- int need_gc = 0;
- ErtsMessage *msgp;
- ErlHeapFragment *hfrag;
-
-#define ERTS_ORDINARY_GC__ (1 << 0)
-#define ERTS_LITERAL_GC__ (1 << 1)
-
- /*
- * Pick up limits for the module.
- */
- start = (BeamInstr*) modp->old.code_hdr;
- mod_start = (char *) start;
- mod_size = modp->old.code_length;
-
- /*
- * Check if current instruction or continuation pointer points into module.
- */
- if (ErtsInArea(rp->i, mod_start, mod_size)
- || ErtsInArea(rp->cp, mod_start, mod_size)) {
- return am_true;
- }
-
+#ifdef HIPE
/*
- * Check all continuation pointers stored on the stack.
+ * Check all continuation pointers stored on the native stack if the module
+ * has native code.
*/
- for (sp = rp->stop; sp < STACK_START(rp); sp++) {
- if (is_CP(*sp) && ErtsInArea(cp_val(*sp), mod_start, mod_size)) {
+ if (modp->old.hipe_code) {
+ nat_start = modp->old.hipe_code->text_segment;
+ nat_size = modp->old.hipe_code->text_segment_size;
+ if (nat_size && nstack_any_cps_in_segment(rp, nat_start, nat_size)) {
return am_true;
}
}
+#endif
/*
* Check all continuation pointers stored in stackdump
@@ -1187,8 +1178,16 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
rp->ftrace = NIL;
} else {
int i;
+ char *area_start = mod_start;
+ Uint area_size = mod_size;
+#ifdef HIPE
+ if (rp->freason & EXF_NATIVE) {
+ area_start = nat_start;
+ area_size = nat_size;
+ }
+#endif
for (i = 0; i < s->depth; i++) {
- if (ErtsInArea(s->trace[i], mod_start, mod_size)) {
+ if (ErtsInArea(s->trace[i], area_start, area_size)) {
rp->freason = EXC_NULL;
rp->fvalue = NIL;
rp->ftrace = NIL;
@@ -1198,183 +1197,9 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
}
}
- if (rp->flags & F_DISABLE_GC) {
- /*
- * Cannot proceed. Process has disabled gc in order to
- * safely leave inconsistent data on the heap and/or
- * off heap lists. Need to wait for gc to be enabled
- * again.
- */
- return THE_NON_VALUE;
- }
-
- /*
- * Message queue can contains funs, and may contain
- * literals. If we got references to this module from the message
- * queue.
- *
- * If a literal is in the message queue we maka an explicit copy of
- * and attach it to the heap fragment. Each message needs to be
- * self contained, we cannot save the literal in the old_heap or
- * any other heap than the message it self.
- */
-
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
-
- if (modp->old.code_hdr->literal_area) {
- literals = (char*) modp->old.code_hdr->literal_area->start;
- lit_bsize = (char*) modp->old.code_hdr->literal_area->end - literals;
- }
- else {
- literals = NULL;
- lit_bsize = 0;
- }
-
- for (msgp = rp->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG)
- hfrag = &msgp->hfrag;
- else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag)
- hfrag = msgp->data.heap_frag;
- else
- continue;
- {
- ErlHeapFragment *hf;
- Uint lit_sz;
- for (hf=hfrag; hf; hf = hf->next) {
- if (check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size))
- return am_true;
- lit_sz = hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size],
- literals, lit_bsize);
- }
- if (lit_sz > 0) {
- ErlHeapFragment *bp = new_message_buffer(lit_sz);
- Eterm *hp = bp->mem;
-
- for (hf=hfrag; hf; hf = hf->next) {
- hfrag_literal_copy(&hp, &bp->off_heap,
- &hf->mem[0], &hf->mem[hf->used_size],
- literals, lit_bsize);
- hfrag=hf;
- }
- /* link new hfrag last */
- ASSERT(hfrag->next == NULL);
- hfrag->next = bp;
- bp->next = NULL;
- }
- }
- }
-
- while (1) {
-
- /* Check heap, stack etc... */
- if (check_mod_funs(rp, &rp->off_heap, mod_start, mod_size))
- goto try_gc;
- if (any_heap_ref_ptrs(&rp->fvalue, &rp->fvalue+1, literals, lit_bsize)) {
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- }
- if (any_heap_ref_ptrs(rp->stop, rp->hend, literals, lit_bsize))
- goto try_literal_gc;
-#ifdef HIPE
- if (nstack_any_heap_ref_ptrs(rp, literals, lit_bsize))
- goto try_literal_gc;
-#endif
- if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize))
- goto try_literal_gc;
- if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize))
- goto try_literal_gc;
-
- /* Check dictionary */
- if (rp->dictionary) {
- Eterm* start = ERTS_PD_START(rp->dictionary);
- Eterm* end = start + ERTS_PD_SIZE(rp->dictionary);
-
- if (any_heap_ref_ptrs(start, end, literals, lit_bsize))
- goto try_literal_gc;
- }
-
- /* Check heap fragments */
- for (hfrag = rp->mbuf; hfrag; hfrag = hfrag->next) {
- Eterm *hp, *hp_end;
- /* Off heap lists should already have been moved into process */
- ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size));
-
- hp = &hfrag->mem[0];
- hp_end = &hfrag->mem[hfrag->used_size];
- if (any_heap_refs(hp, hp_end, literals, lit_bsize))
- goto try_literal_gc;
- }
-
- /*
- * Message buffer fragments (matched messages)
- * - off heap lists should already have been moved into
- * process off heap structure.
- * - Check for literals
- */
- for (msgp = rp->msg_frag; msgp; msgp = msgp->next) {
- hfrag = erts_message_to_heap_frag(msgp);
- for (; hfrag; hfrag = hfrag->next) {
- Eterm *hp, *hp_end;
- ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size));
-
- hp = &hfrag->mem[0];
- hp_end = &hfrag->mem[hfrag->used_size];
-
- if (any_heap_refs(hp, hp_end, literals, lit_bsize))
- goto try_literal_gc;
- }
- }
-
- return am_false;
-
- try_literal_gc:
- need_gc |= ERTS_LITERAL_GC__;
-
- try_gc:
- need_gc |= ERTS_ORDINARY_GC__;
-
- if ((done_gc & need_gc) == need_gc)
- return am_true;
-
- if (!(flags & ERTS_CPC_ALLOW_GC))
- return am_aborted;
-
- need_gc &= ~done_gc;
-
- /*
- * Try to get rid of literals by by garbage collecting.
- * Clear both fvalue and ftrace.
- */
-
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
-
- if (need_gc & ERTS_ORDINARY_GC__) {
- FLAGS(rp) |= F_NEED_FULLSWEEP;
- *redsp += erts_garbage_collect_nobump(rp, 0, rp->arg_reg, rp->arity, fcalls);
- done_gc |= ERTS_ORDINARY_GC__;
- }
- if (need_gc & ERTS_LITERAL_GC__) {
- struct erl_off_heap_header* oh;
- oh = modp->old.code_hdr->literal_area->off_heap;
- *redsp += lit_bsize / 64; /* Need, better value... */
- erts_garbage_collect_literals(rp, (Eterm*)literals, lit_bsize, oh);
- done_gc |= ERTS_LITERAL_GC__;
- }
- need_gc = 0;
- }
-
-#undef ERTS_ORDINARY_GC__
-#undef ERTS_LITERAL_GC__
-
+ return am_false;
}
-#endif /* !ERTS_NEW_PURGE_STRATEGY */
-
static int
any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
{
@@ -1503,9 +1328,6 @@ hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
}
}
-#ifdef ERTS_NEW_PURGE_STRATEGY
-
-#ifdef ERTS_SMP
ErtsThrPrgrLaterOp later_literal_area_switch;
@@ -1527,28 +1349,22 @@ static void
complete_literal_area_switch(void *literal_area)
{
Process *p = erts_literal_area_collector;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
erts_resume(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
if (literal_area)
erts_release_literal_area((ErtsLiteralArea *) literal_area);
}
-#endif
-
-#endif /* ERTS_NEW_PURGE_STRATEGY */
BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
{
-#ifndef ERTS_NEW_PURGE_STRATEGY
- BIF_ERROR(BIF_P, EXC_NOTSUP);
-#else
ErtsLiteralArea *unused_la;
ErtsLiteralAreaRef *la_ref;
if (BIF_P != erts_literal_area_collector)
BIF_ERROR(BIF_P, EXC_NOTSUP);
- erts_smp_mtx_lock(&release_literal_areas.mtx);
+ erts_mtx_lock(&release_literal_areas.mtx);
la_ref = release_literal_areas.first;
if (la_ref) {
@@ -1557,14 +1373,13 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
release_literal_areas.last = NULL;
}
- erts_smp_mtx_unlock(&release_literal_areas.mtx);
+ erts_mtx_unlock(&release_literal_areas.mtx);
unused_la = ERTS_COPY_LITERAL_AREA();
if (!la_ref) {
ERTS_SET_COPY_LITERAL_AREA(NULL);
if (unused_la) {
-#ifdef ERTS_SMP
ErtsLaterReleasLiteralArea *lrlap;
lrlap = erts_alloc(ERTS_ALC_T_RELEASE_LAREA,
sizeof(ErtsLaterReleasLiteralArea));
@@ -1578,9 +1393,6 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
+ ((unused_la->end
- &unused_la->start[0])
- 1)*(sizeof(Eterm))));
-#else
- erts_release_literal_area(unused_la);
-#endif
}
BIF_RET(am_false);
}
@@ -1589,18 +1401,12 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
erts_free(ERTS_ALC_T_LITERAL_REF, la_ref);
-#ifdef ERTS_SMP
erts_schedule_thr_prgr_later_op(complete_literal_area_switch,
unused_la,
&later_literal_area_switch);
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
-#else
- erts_release_literal_area(unused_la);
- BIF_RET(am_true);
-#endif
-#endif /* ERTS_NEW_PURGE_STRATEGY */
}
void
@@ -1623,10 +1429,10 @@ erts_purge_state_add_fun(ErlFunEntry *fe)
}
Export *
-erts_suspend_process_on_pending_purge_lambda(Process *c_p)
+erts_suspend_process_on_pending_purge_lambda(Process *c_p, ErlFunEntry* fe)
{
- erts_smp_mtx_lock(&purge_state.mtx);
- if (is_value(purge_state.module)) {
+ erts_mtx_lock(&purge_state.mtx);
+ if (purge_state.module == fe->module) {
/*
* The process c_p is about to call a fun in the code
* that we are trying to purge. Suspend it and call
@@ -1651,7 +1457,7 @@ erts_suspend_process_on_pending_purge_lambda(Process *c_p)
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_VBUMP_ALL_REDS(c_p);
}
- erts_smp_mtx_unlock(&purge_state.mtx);
+ erts_mtx_unlock(&purge_state.mtx);
return purge_state.pending_purge_lambda;
}
@@ -1661,9 +1467,9 @@ finalize_purge_operation(Process *c_p, int succeded)
Uint ix;
if (c_p)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_mtx_lock(&purge_state.mtx);
+ erts_mtx_lock(&purge_state.mtx);
ASSERT(purge_state.module != THE_NON_VALUE);
@@ -1679,14 +1485,14 @@ finalize_purge_operation(Process *c_p, int succeded)
ERTS_PROC_LOCK_STATUS);
if (rp) {
erts_resume(rp, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
}
}
- erts_smp_mtx_unlock(&purge_state.mtx);
+ erts_mtx_unlock(&purge_state.mtx);
if (c_p)
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
if (purge_state.sprocs != &purge_state.def_sprocs[0]) {
erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs);
@@ -1705,7 +1511,6 @@ finalize_purge_operation(Process *c_p, int succeded)
purge_state.fe_ix = 0;
}
-#ifdef ERTS_SMP
static ErtsThrPrgrLaterOp purger_lop_data;
@@ -1713,9 +1518,9 @@ static void
resume_purger(void *unused)
{
Process *p = erts_code_purger;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
erts_resume(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
static void
@@ -1728,7 +1533,6 @@ finalize_purge_abort(void *unused)
resume_purger(NULL);
}
-#endif /* ERTS_SMP */
BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
{
@@ -1787,17 +1591,13 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
else {
BeamInstr* code;
BeamInstr* end;
- erts_smp_mtx_lock(&purge_state.mtx);
+ erts_mtx_lock(&purge_state.mtx);
purge_state.module = BIF_ARG_1;
- erts_smp_mtx_unlock(&purge_state.mtx);
+ erts_mtx_unlock(&purge_state.mtx);
res = am_true;
code = (BeamInstr*) modp->old.code_hdr;
end = (BeamInstr *)((char *)code + modp->old.code_length);
erts_fun_purge_prepare(code, end);
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
- ASSERT(!ERTS_COPY_LITERAL_AREA());
- ERTS_SET_COPY_LITERAL_AREA(modp->old.code_hdr->literal_area);
-#endif
}
if (BIF_ARG_2 == am_prepare_on_load) {
@@ -1807,9 +1607,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
}
}
-#ifndef ERTS_SMP
- BIF_RET(res);
-#else
if (res != am_true)
BIF_RET(res);
else {
@@ -1828,7 +1625,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
}
-#endif
}
case am_abort: {
@@ -1842,15 +1638,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix);
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
- ASSERT(ERTS_COPY_LITERAL_AREA());
- ERTS_SET_COPY_LITERAL_AREA(NULL);
-#endif
-#ifndef ERTS_SMP
- erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix);
- finalize_purge_operation(BIF_P, 0);
- BIF_RET(am_false);
-#else
/*
* We need to restore the code addresses of the funs in
* two stages in order to ensure that we do not get any
@@ -1866,7 +1653,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
&purger_lop_data);
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD_RETURN(BIF_P, am_false);
-#endif
}
case am_complete: {
@@ -1914,15 +1700,18 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
/*
* Unload any NIF library
*/
- if (modp->old.nif != NULL) {
+ if (modp->old.nif != NULL
+ || IF_HIPE(hipe_purge_need_blocking(modp))) {
/* ToDo: Do unload nif without blocking */
erts_rwunlock_old_code(code_ix);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
is_blocking = 1;
erts_rwlock_old_code(code_ix);
- erts_unload_nif(modp->old.nif);
- modp->old.nif = NULL;
+ if (modp->old.nif) {
+ erts_unload_nif(modp->old.nif);
+ modp->old.nif = NULL;
+ }
}
/*
@@ -1941,7 +1730,9 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
modp->old.code_length = 0;
modp->old.catches = BEAM_CATCHES_NIL;
erts_remove_from_ranges(code);
-
+#ifdef HIPE
+ hipe_purge_module(modp, is_blocking);
+#endif
ERTS_BIF_PREP_RET(ret, am_true);
}
@@ -1952,47 +1743,18 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
erts_rwunlock_old_code(code_ix);
}
if (is_blocking) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
}
erts_release_code_write_permission();
finalize_purge_operation(BIF_P, ret == am_true);
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
-
- ASSERT(ERTS_COPY_LITERAL_AREA() == literals);
- ERTS_SET_COPY_LITERAL_AREA(NULL);
- erts_release_literal_area(literals);
-
-#else /* ERTS_NEW_PURGE_STRATEGY */
-
if (literals) {
- ErtsLiteralAreaRef *ref;
- ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
- sizeof(ErtsLiteralAreaRef));
- ref->literal_area = literals;
- ref->next = NULL;
- erts_smp_mtx_lock(&release_literal_areas.mtx);
- if (release_literal_areas.last) {
- release_literal_areas.last->next = ref;
- release_literal_areas.last = ref;
- }
- else {
- release_literal_areas.first = ref;
- release_literal_areas.last = ref;
- }
- erts_smp_mtx_unlock(&release_literal_areas.mtx);
- erts_queue_message(erts_literal_area_collector,
- 0,
- erts_alloc_message(0, NULL),
- am_copy_literals,
- BIF_P->common.id);
+ erts_queue_release_literals(BIF_P, literals);
}
-#endif /* ERTS_NEW_PURGE_STRATEGY */
-
return ret;
}
@@ -2002,6 +1764,41 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
}
}
+void
+erts_queue_release_literals(Process* c_p, ErtsLiteralArea* literals)
+{
+ ErtsLiteralAreaRef *ref;
+ ErtsMessage *mp;
+ ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
+ sizeof(ErtsLiteralAreaRef));
+ ref->literal_area = literals;
+ ref->next = NULL;
+ erts_mtx_lock(&release_literal_areas.mtx);
+ if (release_literal_areas.last) {
+ release_literal_areas.last->next = ref;
+ release_literal_areas.last = ref;
+ } else {
+ release_literal_areas.first = ref;
+ release_literal_areas.last = ref;
+ }
+ erts_mtx_unlock(&release_literal_areas.mtx);
+ mp = erts_alloc_message(0, NULL);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ if (c_p == NULL) {
+ erts_queue_message(erts_literal_area_collector,
+ 0,
+ mp,
+ am_copy_literals,
+ am_system);
+ } else {
+ erts_queue_proc_message(c_p,
+ erts_literal_area_collector,
+ 0,
+ mp,
+ am_copy_literals);
+ }
+}
+
/*
* Move code from current to old and null all export entries for the module
*/
@@ -2011,38 +1808,39 @@ delete_code(Module* modp)
{
ErtsCodeIndex code_ix = erts_staging_code_ix();
Eterm module = make_atom(modp->module);
- int i;
+ int i, num_exps = export_list_size(code_ix);
- for (i = 0; i < export_list_size(code_ix); i++) {
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
- if (ep != NULL && (ep->code[0] == module)) {
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep != NULL && (ep->info.mfa.module == module)) {
+ if (ep->addressv[code_ix] == ep->beam) {
+ if (BeamIsOpCode(ep->beam[0], op_apply_bif)) {
continue;
}
- else if (ep->code[3] ==
- (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ else if (BeamIsOpCode(ep->beam[0], op_i_generic_breakpoint)) {
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
ASSERT(modp->curr.num_traced_exports > 0);
- erts_clear_export_break(modp, ep->code+3);
+ DBG_TRACE_MFA_P(&ep->info.mfa,
+ "export trace cleared, code_ix=%d", code_ix);
+ erts_clear_export_break(modp, &ep->info);
}
- else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler
- || !erts_initialized);
- }
- ep->addressv[code_ix] = ep->code+3;
- ep->code[3] = (BeamInstr) em_call_error_handler;
- ep->code[4] = 0;
+ else {
+ ASSERT(BeamIsOpCode(ep->beam[0], op_call_error_handler) ||
+ !erts_initialized);
+ }
+ }
+ ep->addressv[code_ix] = ep->beam;
+ ep->beam[0] = BeamOpCodeAddr(op_call_error_handler);
+ ep->beam[1] = 0;
+ DBG_TRACE_MFA_P(&ep->info.mfa,
+ "export invalidation, code_ix=%d", code_ix);
}
}
ASSERT(modp->curr.num_breakpoints == 0);
ASSERT(modp->curr.num_traced_exports == 0);
modp->old = modp->curr;
- modp->curr.code_hdr = NULL;
- modp->curr.code_length = 0;
- modp->curr.catches = BEAM_CATCHES_NIL;
- modp->curr.nif = NULL;
-
+ erts_module_instance_init(&modp->curr);
}
@@ -2056,9 +1854,11 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
* if not, delete old code; error if old code already exists.
*/
- if (modp->curr.code_hdr && modp->old.code_hdr) {
- return am_not_purged;
- } else if (!modp->old.code_hdr) { /* Make the current version old. */
+ if (modp->curr.code_hdr) {
+ if (modp->old.code_hdr) {
+ return am_not_purged;
+ }
+ /* Make the current version old. */
delete_code(modp);
}
return NIL;
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 920c8b1ed0..0832b3f374 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -32,6 +32,7 @@
#include "erl_binary.h"
#include "beam_bp.h"
#include "erl_term.h"
+#include "erl_nfunc_sched.h"
/* *************************************************************************
** Macros
@@ -45,15 +46,15 @@
#define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ))
#define Free(P) erts_free(ERTS_ALC_T_BPD, (P))
-#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+#if defined(ERTS_ENABLE_LOCK_CHECK)
+# define ERTS_REQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
__FILE__, __LINE__)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
#else
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
+# define ERTS_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_UNREQ_PROC_MAIN_LOCK(P)
#endif
#define ERTS_BPF_LOCAL_TRACE 0x01
@@ -72,8 +73,9 @@ extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
extern BeamInstr beam_exception_trace[1]; /* OpCode(i_exception_trace) */
extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
-erts_smp_atomic32_t erts_active_bp_index;
-erts_smp_atomic32_t erts_staging_bp_index;
+erts_atomic32_t erts_active_bp_index;
+erts_atomic32_t erts_staging_bp_index;
+erts_mtx_t erts_dirty_bp_ix_mtx;
/*
* Inlined helpers
@@ -85,6 +87,27 @@ get_mtime(Process *c_p)
return erts_get_monotonic_time(erts_proc_sched_data(c_p));
}
+static ERTS_INLINE Uint32
+acquire_bp_sched_ix(Process *c_p)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ ASSERT(esdp);
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_mtx_lock(&erts_dirty_bp_ix_mtx);
+ return (Uint32) erts_no_schedulers;
+ }
+ return (Uint32) esdp->no - 1;
+}
+
+static ERTS_INLINE void
+release_bp_sched_ix(Uint32 ix)
+{
+ if (ix == (Uint32) erts_no_schedulers)
+ erts_mtx_unlock(&erts_dirty_bp_ix_mtx);
+}
+
+
+
/* *************************************************************************
** Local prototypes
*/
@@ -92,27 +115,27 @@ get_mtime(Process *c_p)
/*
** Helpers
*/
-static ErtsTracer do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+static ErtsTracer do_call_trace(Process* c_p, ErtsCodeInfo *info, Eterm* reg,
int local, Binary* ms, ErtsTracer tracer);
static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
enum erts_break_op count_op, ErtsTracer tracer);
-static void set_function_break(BeamInstr *pc,
+static void set_function_break(ErtsCodeInfo *ci,
Binary *match_spec,
Uint break_flags,
enum erts_break_op count_op,
ErtsTracer tracer);
static void clear_break(BpFunctions* f, Uint break_flags);
-static int clear_function_break(BeamInstr *pc, Uint break_flags);
+static int clear_function_break(ErtsCodeInfo *ci, Uint break_flags);
-static BpDataTime* get_time_break(BeamInstr *pc);
-static GenericBpData* check_break(BeamInstr *pc, Uint break_flags);
+static BpDataTime* get_time_break(ErtsCodeInfo *ci);
+static GenericBpData* check_break(ErtsCodeInfo *ci, Uint break_flags);
-static void bp_meta_unref(BpMetaTracer* bmt);
-static void bp_count_unref(BpCount* bcp);
-static void bp_time_unref(BpDataTime* bdt);
-static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local);
-static void uninstall_breakpoint(BeamInstr* pc);
+static void bp_meta_unref(BpMetaTracer *bmt);
+static void bp_count_unref(BpCount *bcp);
+static void bp_time_unref(BpDataTime *bdt);
+static void consolidate_bp_data(Module *modp, ErtsCodeInfo *ci, int local);
+static void uninstall_breakpoint(ErtsCodeInfo *ci);
/* bp_hash */
#define BP_TIME_ADD(pi0, pi1) \
@@ -133,13 +156,15 @@ static void bp_hash_delete(bp_time_hash_t *hash);
void
erts_bp_init(void) {
- erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
- erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
+ erts_atomic32_init_nob(&erts_active_bp_index, 0);
+ erts_atomic32_init_nob(&erts_staging_bp_index, 1);
+ erts_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
}
void
-erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
+erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
{
ErtsCodeIndex code_ix = erts_active_code_ix();
Uint max_funcs = 0;
@@ -164,41 +189,44 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
i = 0;
for (current = 0; current < num_modules; current++) {
BeamCodeHeader* code_hdr = module[current]->curr.code_hdr;
- BeamInstr* code;
+ ErtsCodeInfo* ci;
Uint num_functions = (Uint)(UWord) code_hdr->num_functions;
Uint fi;
if (specified > 0) {
- if (mfa[0] != make_atom(module[current]->module)) {
+ if (mfa->module != make_atom(module[current]->module)) {
/* Wrong module name */
continue;
}
}
for (fi = 0; fi < num_functions; fi++) {
- BeamInstr* pc;
- int wi;
- code = code_hdr->functions[fi];
- ASSERT(code[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- pc = code+5;
- if (erts_is_native_break(pc)) {
+ ci = code_hdr->functions[fi];
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
+ if (erts_is_function_native(ci)) {
continue;
}
- if (is_nil(code[3])) { /* Ignore BIF stub */
+ if (is_nil(ci->mfa.module)) { /* Ignore BIF stub */
continue;
}
- for (wi = 0;
- wi < specified && (Eterm) code[2+wi] == mfa[wi];
- wi++) {
- /* Empty loop body */
- }
- if (wi == specified) {
- /* Store match */
- f->matching[i].pc = pc;
- f->matching[i].mod = module[current];
- i++;
- }
+ switch (specified) {
+ case 3:
+ if (ci->mfa.arity != mfa->arity)
+ continue;
+ case 2:
+ if (ci->mfa.function != mfa->function)
+ continue;
+ case 1:
+ if (ci->mfa.module != mfa->module)
+ continue;
+ case 0:
+ break;
+ }
+ /* Store match */
+ f->matching[i].ci = ci;
+ f->matching[i].mod = module[current];
+ i++;
}
}
f->matched = i;
@@ -206,7 +234,7 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
}
void
-erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
+erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified)
{
ErtsCodeIndex code_ix = erts_active_code_ix();
int i;
@@ -218,27 +246,36 @@ erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
for (i = 0; i < num_exps; i++) {
Export* ep = export_list(i, code_ix);
BeamInstr* pc;
- int j;
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j < specified) {
- continue;
- }
- pc = ep->code+3;
+ switch (specified) {
+ case 3:
+ if (mfa->arity != ep->info.mfa.arity)
+ continue;
+ case 2:
+ if (mfa->function != ep->info.mfa.function)
+ continue;
+ case 1:
+ if (mfa->module != ep->info.mfa.module)
+ continue;
+ case 0:
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ pc = ep->beam;
if (ep->addressv[code_ix] == pc) {
- if ((*pc == (BeamInstr) em_apply_bif ||
- *pc == (BeamInstr) em_call_error_handler)) {
- continue;
+ if (BeamIsOpCode(*pc, op_apply_bif) ||
+ BeamIsOpCode(*pc, op_call_error_handler)) {
+ continue;
}
- ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
- } else if (erts_is_native_break(ep->addressv[code_ix])) {
+ ASSERT(BeamIsOpCode(*pc, op_i_generic_breakpoint));
+ } else if (erts_is_function_native(erts_code_to_codeinfo(ep->addressv[code_ix]))) {
continue;
}
- f->matching[ne].pc = pc;
- f->matching[ne].mod = erts_get_module(ep->code[0], code_ix);
+ f->matching[ne].ci = &ep->info;
+ f->matching[ne].mod = erts_get_module(ep->info.mfa.module, code_ix);
ne++;
}
@@ -261,10 +298,10 @@ erts_consolidate_bp_data(BpFunctions* f, int local)
Uint i;
Uint n = f->matched;
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
for (i = 0; i < n; i++) {
- consolidate_bp_data(fs[i].mod, fs[i].pc, local);
+ consolidate_bp_data(fs[i].mod, fs[i].ci, local);
}
}
@@ -273,17 +310,17 @@ erts_consolidate_bif_bp_data(void)
{
int i;
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
for (i = 0; i < BIF_SIZE; i++) {
Export *ep = bif_export[i];
- consolidate_bp_data(0, ep->code+3, 0);
+ consolidate_bp_data(0, &ep->info, 0);
}
}
static void
-consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
+consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ci->u.gen_bp;
GenericBpData* src;
GenericBpData* dst;
Uint flags;
@@ -329,9 +366,10 @@ consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
}
ASSERT(modp->curr.num_breakpoints >= 0);
ASSERT(modp->curr.num_traced_exports >= 0);
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ ASSERT(! BeamIsOpCode(*erts_codeinfo_to_code(ci),
+ op_i_generic_breakpoint));
}
- pc[-4] = 0;
+ ci->u.gen_bp = NULL;
Free(g);
return;
}
@@ -368,8 +406,8 @@ erts_commit_staged_bp(void)
ErtsBpIndex staging = erts_staging_bp_ix();
ErtsBpIndex active = erts_active_bp_ix();
- erts_smp_atomic32_set_nob(&erts_active_bp_index, staging);
- erts_smp_atomic32_set_nob(&erts_staging_bp_index, active);
+ erts_atomic32_set_nob(&erts_active_bp_index, staging);
+ erts_atomic32_set_nob(&erts_staging_bp_index, active);
}
void
@@ -377,12 +415,15 @@ erts_install_breakpoints(BpFunctions* f)
{
Uint i;
Uint n = f->matched;
- BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+ BeamInstr br = BeamOpCodeAddr(op_i_generic_breakpoint);
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- GenericBp* g = (GenericBp *) pc[-4];
- if (*pc != br && g) {
+ ErtsCodeInfo* ci = f->matching[i].ci;
+ GenericBp* g = ci->u.gen_bp;
+ BeamInstr volatile *pc = erts_codeinfo_to_code(ci);
+ BeamInstr instr = *pc;
+
+ if (!BeamIsOpCode(instr, op_i_generic_breakpoint) && g) {
Module* modp = f->matching[i].mod;
/*
@@ -396,11 +437,16 @@ erts_install_breakpoints(BpFunctions* f)
/*
* The following write is not protected by any lock. We
* assume that the hardware guarantees that a write of an
- * aligned word-size (or half-word) writes is atomic
- * (i.e. that other processes executing this code will not
- * see a half pointer).
+ * aligned word-size writes is atomic (i.e. that other
+ * processes executing this code will not see a half
+ * pointer).
+ *
+ * The contents of *pc is marked 'volatile' to ensure that
+ * the compiler will do a single full-word write, and not
+ * try any fancy optimizations to write a half word.
*/
- *pc = br;
+ instr = BeamSetCodeAddr(instr, br);
+ *pc = instr;
modp->curr.num_breakpoints++;
}
}
@@ -413,16 +459,16 @@ erts_uninstall_breakpoints(BpFunctions* f)
Uint n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- uninstall_breakpoint(pc);
+ uninstall_breakpoint(f->matching[i].ci);
}
}
static void
-uninstall_breakpoint(BeamInstr* pc)
+uninstall_breakpoint(ErtsCodeInfo *ci)
{
- if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- GenericBp* g = (GenericBp *) pc[-4];
+ BeamInstr *pc = erts_codeinfo_to_code(ci);
+ if (BeamIsOpCode(*pc, op_i_generic_breakpoint)) {
+ GenericBp* g = ci->u.gen_bp;
if (g->data[erts_active_bp_ix()].flags == 0) {
/*
* The following write is not protected by any lock. We
@@ -449,30 +495,30 @@ erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, ErtsTracer tracer)
}
void
-erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local)
+erts_set_call_trace_bif(ErtsCodeInfo *ci, Binary *match_spec, int local)
{
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
- set_function_break(pc, match_spec, flags, 0, erts_tracer_nil);
+ set_function_break(ci, match_spec, flags, 0, erts_tracer_nil);
}
void
-erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, ErtsTracer tracer)
+erts_set_mtrace_bif(ErtsCodeInfo *ci, Binary *match_spec, ErtsTracer tracer)
{
- set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
+ set_function_break(ci, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
}
void
-erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op)
+erts_set_time_trace_bif(ErtsCodeInfo *ci, enum erts_break_op count_op)
{
- set_function_break(pc, NULL,
+ set_function_break(ci, NULL,
ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
count_op, erts_tracer_nil);
}
void
-erts_clear_time_trace_bif(BeamInstr *pc) {
- clear_function_break(pc, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+erts_clear_time_trace_bif(ErtsCodeInfo *ci) {
+ clear_function_break(ci, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
}
void
@@ -501,14 +547,14 @@ erts_clear_trace_break(BpFunctions* f)
}
void
-erts_clear_call_trace_bif(BeamInstr *pc, int local)
+erts_clear_call_trace_bif(ErtsCodeInfo *ci, int local)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ci->u.gen_bp;
if (g) {
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
if (g->data[erts_staging_bp_ix()].flags & flags) {
- clear_function_break(pc, flags);
+ clear_function_break(ci, flags);
}
}
}
@@ -520,15 +566,15 @@ erts_clear_mtrace_break(BpFunctions* f)
}
void
-erts_clear_mtrace_bif(BeamInstr *pc)
+erts_clear_mtrace_bif(ErtsCodeInfo *ci)
{
- clear_function_break(pc, ERTS_BPF_META_TRACE);
+ clear_function_break(ci, ERTS_BPF_META_TRACE);
}
void
erts_clear_debug_break(BpFunctions* f)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
clear_break(f, ERTS_BPF_DEBUG);
}
@@ -556,7 +602,7 @@ erts_clear_module_break(Module *modp) {
Uint n;
Uint i;
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
ASSERT(modp);
code_hdr = modp->curr.code_hdr;
if (!code_hdr) {
@@ -564,52 +610,91 @@ erts_clear_module_break(Module *modp) {
}
n = (Uint)(UWord) code_hdr->num_functions;
for (i = 0; i < n; ++i) {
- BeamInstr* pc;
-
- pc = code_hdr->functions[i] + 5;
- if (erts_is_native_break(pc)) {
+ ErtsCodeInfo *ci = code_hdr->functions[i];
+ if (erts_is_function_native(ci))
continue;
- }
- clear_function_break(pc, ERTS_BPF_ALL);
+ clear_function_break(ci, ERTS_BPF_ALL);
}
erts_commit_staged_bp();
for (i = 0; i < n; ++i) {
- BeamInstr* pc;
-
- pc = code_hdr->functions[i] + 5;
- if (erts_is_native_break(pc)) {
+ ErtsCodeInfo *ci = code_hdr->functions[i];
+ if (erts_is_function_native(ci))
continue;
- }
- uninstall_breakpoint(pc);
- consolidate_bp_data(modp, pc, 1);
- ASSERT(pc[-4] == 0);
+ uninstall_breakpoint(ci);
+ consolidate_bp_data(modp, ci, 1);
+ ASSERT(ci->u.gen_bp == NULL);
}
return n;
}
void
-erts_clear_export_break(Module* modp, BeamInstr* pc)
+erts_clear_export_break(Module* modp, ErtsCodeInfo *ci)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
- clear_function_break(pc, ERTS_BPF_ALL);
+ clear_function_break(ci, ERTS_BPF_ALL);
erts_commit_staged_bp();
- *pc = (BeamInstr) 0;
- consolidate_bp_data(modp, pc, 0);
- ASSERT(pc[-4] == 0);
+ *erts_codeinfo_to_code(ci) = (BeamInstr) 0;
+ consolidate_bp_data(modp, ci, 0);
+ ASSERT(ci->u.gen_bp == NULL);
+}
+
+/*
+ * If c_p->cp is a trace return instruction, we set cp
+ * to be the place where we again start to execute code.
+ *
+ * cp is used by match spec {caller} to get the calling
+ * function, and if we don't do this fixup it will be
+ * 'undefined'. This has the odd side effect of {caller}
+ * not really being which function is the caller, but
+ * rather which function we are about to return to.
+ */
+static void fixup_cp_before_trace(Process *c_p, int *return_to_trace)
+{
+ Eterm *cpp, *E = c_p->stop;
+ BeamInstr w = *c_p->cp;
+ if (BeamIsOpCode(w, op_return_trace)) {
+ cpp = &E[2];
+ } else if (BeamIsOpCode(w, op_i_return_to_trace)) {
+ *return_to_trace = 1;
+ cpp = &E[0];
+ } else if (BeamIsOpCode(w, op_i_return_time_trace)) {
+ cpp = &E[0];
+ } else {
+ cpp = NULL;
+ }
+ if (cpp) {
+ for (;;) {
+ BeamInstr w = *cp_val(*cpp);
+ if (BeamIsOpCode(w, op_return_trace)) {
+ cpp += 3;
+ } else if (BeamIsOpCode(w, op_i_return_to_trace)) {
+ *return_to_trace = 1;
+ cpp += 1;
+ } else if (BeamIsOpCode(w, op_i_return_time_trace)) {
+ cpp += 2;
+ } else {
+ break;
+ }
+ }
+ c_p->cp = (BeamInstr *) cp_val(*cpp);
+ ASSERT(is_CP(*cpp));
+ }
}
BeamInstr
-erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
+erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg)
{
GenericBp* g;
GenericBpData* bp;
Uint bp_flags;
ErtsBpIndex ix = erts_active_bp_ix();
- g = (GenericBp *) I[-4];
+ ASSERT(BeamIsOpCode(info->op, op_i_func_info_IaaI));
+
+ g = info->u.gen_bp;
bp = &g->data[ix];
bp_flags = bp->flags;
ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
@@ -628,19 +713,20 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
- (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, erts_tracer_true);
+ (void) do_call_trace(c_p, info, reg, 1, bp->local_ms, erts_tracer_true);
} else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
- (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, erts_tracer_true);
+ (void) do_call_trace(c_p, info, reg, 0, bp->local_ms, erts_tracer_true);
}
if (bp_flags & ERTS_BPF_META_TRACE) {
ErtsTracer old_tracer, new_tracer;
- old_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
+ old_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer);
+
+ new_tracer = do_call_trace(c_p, info, reg, 1, bp->meta_ms, old_tracer);
- new_tracer = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_tracer);
if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) {
- if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
+ if (old_tracer == erts_atomic_cmpxchg_acqb(
&bp->meta_tracer->tracer,
(erts_aint_t)new_tracer,
(erts_aint_t)old_tracer)) {
@@ -652,20 +738,20 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
}
if (bp_flags & ERTS_BPF_COUNT_ACTIVE) {
- erts_smp_atomic_inc_nob(&bp->count->acount);
+ erts_atomic_inc_nob(&bp->count->acount);
}
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
Eterm w;
- erts_trace_time_call(c_p, I, bp->time);
+ erts_trace_time_call(c_p, info, bp->time);
w = (BeamInstr) *c_p->cp;
- if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) ||
- w == (BeamInstr) BeamOp(op_return_trace) ||
- w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) {
+ if (! (BeamIsOpCode(w, op_i_return_time_trace) ||
+ BeamIsOpCode(w, op_return_trace) ||
+ BeamIsOpCode(w, op_i_return_to_trace)) ) {
Eterm* E = c_p->stop;
ASSERT(c_p->htop <= E && E <= c_p->hend);
if (E - 2 < c_p->htop) {
- (void) erts_garbage_collect(c_p, 2, reg, I[-1]);
+ (void) erts_garbage_collect(c_p, 2, reg, info->mfa.arity);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
}
E = c_p->stop;
@@ -673,7 +759,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
ASSERT(c_p->htop <= E && E <= c_p->hend);
E -= 2;
- E[0] = make_cp(I);
+ E[0] = make_cp(erts_codeinfo_to_code(info));
E[1] = make_cp(c_p->cp); /* original return address */
c_p->cp = beam_return_time_trace;
c_p->stop = E;
@@ -681,7 +767,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
}
if (bp_flags & ERTS_BPF_DEBUG) {
- return (BeamInstr) BeamOp(op_i_debug_breakpoint);
+ return BeamOpCodeAddr(op_i_debug_breakpoint);
} else {
return g->orig_instr;
}
@@ -701,17 +787,18 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
Export* ep = bif_export[bif_index];
Uint32 flags = 0, flags_meta = 0;
ErtsTracer meta_tracer = erts_tracer_nil;
- int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
- * is actually in the
- * export entry */
+ int applying = (I == ep->beam); /* Yup, the apply code for a bif
+ * is actually in the
+ * export entry */
BeamInstr *cp = p->cp;
GenericBp* g;
GenericBpData* bp = NULL;
Uint bp_flags = 0;
+ int return_to_trace = 0;
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ g = ep->info.u.gen_bp;
if (g) {
bp = &g->data[erts_active_bp_ix()];
bp_flags = bp->flags;
@@ -723,25 +810,27 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
*/
if (!applying) {
p->cp = I;
+ } else {
+ fixup_cp_before_trace(p, &return_to_trace);
}
if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
IS_TRACED_FL(p, F_TRACE_CALLS)) {
int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
- flags = erts_call_trace(p, ep->code, bp->local_ms, args,
+ flags = erts_call_trace(p, &ep->info, bp->local_ms, args,
local, &ERTS_TRACER(p));
}
if (bp_flags & ERTS_BPF_META_TRACE) {
ErtsTracer old_tracer;
- meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
+ meta_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer);
old_tracer = meta_tracer;
- flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
+ flags_meta = erts_call_trace(p, &ep->info, bp->meta_ms, args,
0, &meta_tracer);
if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
ErtsTracer new_tracer = erts_tracer_nil;
erts_tracer_update(&new_tracer, meta_tracer);
- if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
+ if (old_tracer == erts_atomic_cmpxchg_acqb(
&bp->meta_tracer->tracer,
(erts_aint_t)new_tracer,
(erts_aint_t)old_tracer)) {
@@ -753,8 +842,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
IS_TRACED_FL(p, F_TRACE_CALLS)) {
- BeamInstr *pc = (BeamInstr *)ep->code+3;
- erts_trace_time_call(p, pc, bp->time);
+ erts_trace_time_call(p, &ep->info, bp->time);
}
/* Restore original continuation pointer (if changed). */
@@ -764,6 +852,30 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
result = func(p, args, I);
+ if (erts_nif_export_check_save_trace(p, result,
+ applying, ep,
+ cp, flags,
+ flags_meta, I,
+ meta_tracer)) {
+ /*
+ * erts_bif_trace_epilogue() will be called
+ * later when appropriate via the NIF export
+ * scheduling functionality...
+ */
+ return result;
+ }
+
+ return erts_bif_trace_epilogue(p, result, applying, ep, cp,
+ flags, flags_meta, I,
+ meta_tracer);
+}
+
+Eterm
+erts_bif_trace_epilogue(Process *p, Eterm result, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
BeamInstr i_return_trace = beam_return_trace[0];
BeamInstr i_return_to_trace = beam_return_to_trace[0];
@@ -817,11 +929,11 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
class = exception_tag[GET_EXC_CLASS(reason)];
if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
+ erts_trace_exception(p, &ep->info.mfa, class, value,
&meta_tracer);
}
if (flags & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
+ erts_trace_exception(p, &ep->info.mfa, class, value,
&ERTS_TRACER(p));
}
if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
@@ -845,18 +957,18 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
}
if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
}
}
} else {
if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &meta_tracer);
+ erts_trace_return(p, &ep->info.mfa, result, &meta_tracer);
}
/* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &ERTS_TRACER(p));
+ erts_trace_return(p, &ep->info.mfa, result, &ERTS_TRACER(p));
}
if (flags & MATCH_SET_RETURN_TO_TRACE &&
IS_TRACED_FL(p, F_TRACE_RETURN_TO)) {
@@ -870,57 +982,28 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
}
}
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
return result;
}
static ErtsTracer
-do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg,
int local, Binary* ms, ErtsTracer tracer)
{
- Eterm* cpp;
int return_to_trace = 0;
- BeamInstr w;
BeamInstr *cp_save = c_p->cp;
Uint32 flags;
Uint need = 0;
Eterm* E = c_p->stop;
- w = *c_p->cp;
- if (w == (BeamInstr) BeamOp(op_return_trace)) {
- cpp = &E[2];
- } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
- return_to_trace = 1;
- cpp = &E[0];
- } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
- cpp = &E[0];
- } else {
- cpp = NULL;
- }
- if (cpp) {
- for (;;) {
- BeamInstr w = *cp_val(*cpp);
- if (w == (BeamInstr) BeamOp(op_return_trace)) {
- cpp += 3;
- } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
- return_to_trace = 1;
- cpp += 1;
- } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
- cpp += 2;
- } else {
- break;
- }
- }
- cp_save = c_p->cp;
- c_p->cp = (BeamInstr *) cp_val(*cpp);
- ASSERT(is_CP(*cpp));
- }
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- if (cpp) {
- c_p->cp = cp_save;
- }
+ fixup_cp_before_trace(c_p, &return_to_trace);
+
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ flags = erts_call_trace(c_p, info, ms, reg, local, &tracer);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+
+ /* restore cp after potential fixup */
+ c_p->cp = cp_save;
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
@@ -932,7 +1015,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
if (need) {
ASSERT(c_p->htop <= E && E <= c_p->hend);
if (E - need < c_p->htop) {
- (void) erts_garbage_collect(c_p, need, reg, I[-1]);
+ (void) erts_garbage_collect(c_p, need, reg, info->mfa.arity);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
E = c_p->stop;
}
@@ -948,39 +1031,40 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
E -= 3;
c_p->stop = E;
ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((Eterm) (UWord) (I - 3)));
+ ASSERT(is_CP((Eterm) (UWord) (&info->mfa.module)));
ASSERT(IS_TRACER_VALID(tracer));
E[2] = make_cp(c_p->cp);
E[1] = copy_object(tracer, c_p);
- E[0] = make_cp(I - 3); /* We ARE at the beginning of an
- instruction,
+ E[0] = make_cp(&info->mfa.module);
+ /* We ARE at the beginning of an instruction,
the funcinfo is above i. */
c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ?
beam_exception_trace : beam_return_trace;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
} else
c_p->stop = E;
return tracer;
}
void
-erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
+erts_trace_time_call(Process* c_p, ErtsCodeInfo *info, BpDataTime* bdt)
{
ErtsMonotonicTime time;
process_breakpoint_time_t *pbt = NULL;
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(c_p);
ASSERT(c_p);
- ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING
+ ASSERT(erts_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING));
/* get previous timestamp and breakpoint
* from the process psd */
-
+
pbt = ERTS_PROC_GET_CALL_TIME(c_p);
time = get_mtime(c_p);
@@ -995,18 +1079,18 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
pbt = Alloc(sizeof(process_breakpoint_time_t));
(void) ERTS_PROC_SET_CALL_TIME(c_p, pbt);
} else {
- ASSERT(pbt->pc);
+ ASSERT(pbt->ci);
/* add time to previous code */
sitem.time = time - pbt->time;
sitem.pid = c_p->common.id;
sitem.count = 0;
/* previous breakpoint */
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
/* if null then the breakpoint was removed */
if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(c_p)]);
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1027,7 +1111,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
/* this breakpoint */
ASSERT(bdt);
- h = &(bdt->hash[bp_sched2ix_proc(c_p)]);
+ h = &(bdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1039,21 +1123,24 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
BP_TIME_ADD(item, &sitem);
}
- pbt->pc = I;
+ pbt->ci = info;
pbt->time = time;
+
+ release_bp_sched_ix(six);
}
void
-erts_trace_time_return(Process *p, BeamInstr *pc)
+erts_trace_time_return(Process *p, ErtsCodeInfo *ci)
{
ErtsMonotonicTime time;
process_breakpoint_time_t *pbt = NULL;
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(p);
ASSERT(p);
- ASSERT(erts_smp_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING
+ ASSERT(erts_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING));
/* get previous timestamp and breakpoint
@@ -1071,21 +1158,23 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
*/
if (pbt) {
+
/* might have been removed due to
* trace_pattern(false)
*/
- ASSERT(pbt->pc);
+ ASSERT(pbt->ci);
sitem.time = time - pbt->time;
sitem.pid = p->common.id;
sitem.count = 0;
/* previous breakpoint */
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
/* beware, the trace_pattern might have been removed */
if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1096,18 +1185,22 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
} else {
BP_TIME_ADD(item, &sitem);
}
+
}
- pbt->pc = pc;
+ pbt->ci = ci;
pbt->time = time;
+
}
+
+ release_bp_sched_ix(six);
}
int
-erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
+erts_is_trace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, int local)
{
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
- GenericBpData* bp = check_break(pc, flags);
+ GenericBpData* bp = check_break(ci, flags);
if (bp) {
if (match_spec_ret) {
@@ -1119,55 +1212,44 @@ erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
}
int
-erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
+erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret,
ErtsTracer *tracer_ret)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_META_TRACE);
if (bp) {
if (match_spec_ret) {
*match_spec_ret = bp->meta_ms;
}
if (tracer_ret) {
- *tracer_ret = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
+ *tracer_ret = erts_atomic_read_nob(&bp->meta_tracer->tracer);
}
return 1;
}
return 0;
}
-int
-erts_is_native_break(BeamInstr *pc) {
-#ifdef HIPE
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- return pc[0] == (BeamInstr) BeamOp(op_hipe_trap_call)
- || pc[0] == (BeamInstr) BeamOp(op_hipe_trap_call_closure);
-#else
- return 0;
-#endif
-}
-
int
-erts_is_count_break(BeamInstr *pc, Uint *count_ret)
+erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_COUNT);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_COUNT);
if (bp) {
if (count_ret) {
- *count_ret = (Uint) erts_smp_atomic_read_nob(&bp->count->acount);
+ *count_ret = (Uint) erts_atomic_read_nob(&bp->count->acount);
}
return 1;
}
return 0;
}
-int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
+int erts_is_time_break(Process *p, ErtsCodeInfo *ci, Eterm *retval) {
Uint i, ix;
bp_time_hash_t hash;
Uint size;
Eterm *hp, t;
bp_data_time_item_t *item = NULL;
- BpDataTime *bdt = get_time_break(pc);
+ BpDataTime *bdt = get_time_break(ci);
if (bdt) {
if (retval) {
@@ -1221,26 +1303,25 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
}
-BeamInstr *
-erts_find_local_func(Eterm mfa[3]) {
+ErtsCodeInfo *
+erts_find_local_func(ErtsCodeMFA *mfa) {
Module *modp;
BeamCodeHeader* code_hdr;
- BeamInstr* code_ptr;
+ ErtsCodeInfo* ci;
Uint i,n;
- if ((modp = erts_get_module(mfa[0], erts_active_code_ix())) == NULL)
+ if ((modp = erts_get_module(mfa->module, erts_active_code_ix())) == NULL)
return NULL;
if ((code_hdr = modp->curr.code_hdr) == NULL)
return NULL;
n = (BeamInstr) code_hdr->num_functions;
for (i = 0; i < n; ++i) {
- code_ptr = code_hdr->functions[i];
- ASSERT(((BeamInstr) BeamOp(op_i_func_info_IaaI)) == code_ptr[0]);
- ASSERT(mfa[0] == ((Eterm) code_ptr[2]) ||
- is_nil((Eterm) code_ptr[2]));
- if (mfa[1] == ((Eterm) code_ptr[3]) &&
- ((BeamInstr) mfa[2]) == code_ptr[4]) {
- return code_ptr + 5;
+ ci = code_hdr->functions[i];
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
+ ASSERT(mfa->module == ci->mfa.module || is_nil(ci->mfa.module));
+ if (mfa->function == ci->mfa.function &&
+ mfa->arity == ci->mfa.arity) {
+ return ci;
}
}
return NULL;
@@ -1353,6 +1434,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(p);
ASSERT(p);
@@ -1369,13 +1451,13 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
* the previous breakpoint.
*/
- pbdt = get_time_break(pbt->pc);
+ pbdt = get_time_break(pbt->ci);
if (pbdt) {
sitem.time = get_mtime(p) - pbt->time;
sitem.pid = p->common.id;
sitem.count = 0;
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1401,6 +1483,8 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
break;
}
} /* pbt */
+
+ release_bp_sched_ix(six);
}
/* *************************************************************************
@@ -1417,14 +1501,14 @@ set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- set_function_break(pc, match_spec, break_flags,
+ set_function_break(f->matching[i].ci,
+ match_spec, break_flags,
count_op, tracer);
}
}
static void
-set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
+set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags,
enum erts_break_op count_op, ErtsTracer tracer)
{
GenericBp* g;
@@ -1432,8 +1516,8 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
Uint common;
ErtsBpIndex ix = erts_staging_bp_ix();
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
- g = (GenericBp *) pc[-4];
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
+ g = ci->u.gen_bp;
if (g == 0) {
int i;
if (count_op == ERTS_BREAK_RESTART || count_op == ERTS_BREAK_PAUSE) {
@@ -1441,11 +1525,11 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
return;
}
g = Alloc(sizeof(GenericBp));
- g->orig_instr = *pc;
+ g->orig_instr = *erts_codeinfo_to_code(ci);
for (i = 0; i < ERTS_NUM_BP_IX; i++) {
g->data[i].flags = 0;
}
- pc[-4] = (BeamInstr) g;
+ ci->u.gen_bp = g;
}
bp = &g->data[ix];
@@ -1464,7 +1548,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
bp->flags &= ~ERTS_BPF_COUNT_ACTIVE;
} else {
bp->flags |= ERTS_BPF_COUNT_ACTIVE;
- erts_smp_atomic_set_nob(&bp->count->acount, 0);
+ erts_atomic_set_nob(&bp->count->acount, 0);
}
ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
return;
@@ -1500,7 +1584,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
bmt = Alloc(sizeof(BpMetaTracer));
erts_refc_init(&bmt->refc, 1);
erts_tracer_update(&meta_tracer, tracer); /* copy tracer */
- erts_smp_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer);
+ erts_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer);
bp->meta_tracer = bmt;
} else if (break_flags & ERTS_BPF_COUNT) {
BpCount* bcp;
@@ -1508,7 +1592,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
ASSERT((bp->flags & ERTS_BPF_COUNT) == 0);
bcp = Alloc(sizeof(BpCount));
erts_refc_init(&bcp->refc, 1);
- erts_smp_atomic_init_nob(&bcp->acount, 0);
+ erts_atomic_init_nob(&bcp->acount, 0);
bp->count = bcp;
} else if (break_flags & ERTS_BPF_TIME_TRACE) {
BpDataTime* bdt;
@@ -1517,7 +1601,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
bdt = Alloc(sizeof(BpDataTime));
erts_refc_init(&bdt->refc, 1);
- bdt->n = erts_no_schedulers;
+ bdt->n = erts_no_schedulers + 1;
bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
for (i = 0; i < bdt->n; i++) {
bp_hash_init(&(bdt->hash[i]), 32);
@@ -1537,22 +1621,21 @@ clear_break(BpFunctions* f, Uint break_flags)
n = f->matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = f->matching[i].pc;
- clear_function_break(pc, break_flags);
+ clear_function_break(f->matching[i].ci, break_flags);
}
}
static int
-clear_function_break(BeamInstr *pc, Uint break_flags)
+clear_function_break(ErtsCodeInfo *ci, Uint break_flags)
{
GenericBp* g;
GenericBpData* bp;
Uint common;
ErtsBpIndex ix = erts_staging_bp_ix();
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
- if ((g = (GenericBp *) pc[-4]) == 0) {
+ if ((g = ci->u.gen_bp) == NULL) {
return 1;
}
@@ -1584,7 +1667,7 @@ static void
bp_meta_unref(BpMetaTracer* bmt)
{
if (erts_refc_dectest(&bmt->refc, 0) <= 0) {
- ErtsTracer trc = erts_smp_atomic_read_nob(&bmt->tracer);
+ ErtsTracer trc = erts_atomic_read_nob(&bmt->tracer);
ERTS_TRACER_CLEAR(&trc);
Free(bmt);
}
@@ -1625,7 +1708,7 @@ bp_time_unref(BpDataTime* bdt)
if (pbt) {
Free(pbt);
}
- erts_smp_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN);
}
}
}
@@ -1638,19 +1721,19 @@ bp_time_unref(BpDataTime* bdt)
}
static BpDataTime*
-get_time_break(BeamInstr *pc)
+get_time_break(ErtsCodeInfo *ci)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_TIME_TRACE);
+ GenericBpData* bp = check_break(ci, ERTS_BPF_TIME_TRACE);
return bp ? bp->time : 0;
}
static GenericBpData*
-check_break(BeamInstr *pc, Uint break_flags)
+check_break(ErtsCodeInfo *ci, Uint break_flags)
{
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ci->u.gen_bp;
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (erts_is_native_break(pc)) {
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
+ if (erts_is_function_native(ci)) {
return 0;
}
if (g) {
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 541af77211..a64765822b 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -46,16 +46,16 @@ typedef struct bp_data_time { /* Call time */
typedef struct {
ErtsMonotonicTime time;
- BeamInstr *pc;
+ ErtsCodeInfo *ci;
} process_breakpoint_time_t; /* used within psd */
typedef struct {
- erts_smp_atomic_t acount;
+ erts_atomic_t acount;
erts_refc_t refc;
} BpCount;
typedef struct {
- erts_smp_atomic_t tracer;
+ erts_atomic_t tracer;
erts_refc_t refc;
} BpMetaTracer;
@@ -79,11 +79,7 @@ typedef struct generic_bp {
#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
-#ifdef ERTS_SMP
-#define bp_sched2ix_proc(p) (erts_proc_sched_data(p)->no - 1)
-#else
-#define bp_sched2ix_proc(p) (0)
-#endif
+extern erts_mtx_t erts_dirty_bp_ix_mtx;
enum erts_break_op{
ERTS_BREAK_NOP = 0, /* Must be false */
@@ -95,7 +91,7 @@ enum erts_break_op{
typedef Uint32 ErtsBpIndex;
typedef struct {
- BeamInstr* pc;
+ ErtsCodeInfo *ci;
Module* mod;
} BpFunction;
@@ -116,8 +112,8 @@ void erts_commit_staged_bp(void);
ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void);
ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void);
-void erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified);
-void erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified);
+void erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified);
void erts_bp_free_matched_functions(BpFunctions* f);
void erts_install_breakpoints(BpFunctions* f);
@@ -128,15 +124,15 @@ void erts_consolidate_bif_bp_data(void);
void erts_set_trace_break(BpFunctions *f, Binary *match_spec);
void erts_clear_trace_break(BpFunctions *f);
-void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local);
-void erts_clear_call_trace_bif(BeamInstr *pc, int local);
+void erts_set_call_trace_bif(ErtsCodeInfo *ci, Binary *match_spec, int local);
+void erts_clear_call_trace_bif(ErtsCodeInfo *ci, int local);
void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
ErtsTracer tracer);
void erts_clear_mtrace_break(BpFunctions *f);
-void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
+void erts_set_mtrace_bif(ErtsCodeInfo *ci, Binary *match_spec,
ErtsTracer tracer);
-void erts_clear_mtrace_bif(BeamInstr *pc);
+void erts_clear_mtrace_bif(ErtsCodeInfo *ci);
void erts_set_debug_break(BpFunctions *f);
void erts_clear_debug_break(BpFunctions *f);
@@ -146,46 +142,46 @@ void erts_clear_count_break(BpFunctions *f);
void erts_clear_all_breaks(BpFunctions* f);
int erts_clear_module_break(Module *modp);
-void erts_clear_export_break(Module *modp, BeamInstr* pc);
+void erts_clear_export_break(Module *modp, ErtsCodeInfo* ci);
-BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg);
-BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
+BeamInstr erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *ci, Eterm* reg);
+BeamInstr erts_trace_break(Process *p, ErtsCodeInfo *ci, Eterm *args,
Uint32 *ret_flags, ErtsTracer *tracer);
-int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local);
-int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
+int erts_is_trace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, int local);
+int erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret,
ErtsTracer *tracer_ret);
-int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
+int erts_is_mtrace_bif(ErtsCodeInfo *ci, Binary **match_spec_ret,
ErtsTracer *tracer_ret);
-int erts_is_native_break(BeamInstr *pc);
-int erts_is_count_break(BeamInstr *pc, Uint *count_ret);
-int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
+int erts_is_native_break(ErtsCodeInfo *ci);
+int erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret);
+int erts_is_time_break(Process *p, ErtsCodeInfo *ci, Eterm *call_time);
-void erts_trace_time_call(Process* c_p, BeamInstr* pc, BpDataTime* bdt);
-void erts_trace_time_return(Process* c_p, BeamInstr* pc);
+void erts_trace_time_call(Process* c_p, ErtsCodeInfo *ci, BpDataTime* bdt);
+void erts_trace_time_return(Process* c_p, ErtsCodeInfo *ci);
void erts_schedule_time_break(Process *p, Uint out);
void erts_set_time_break(BpFunctions *f, enum erts_break_op);
void erts_clear_time_break(BpFunctions *f);
-int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
-void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
-void erts_clear_time_trace_bif(BeamInstr *pc);
+int erts_is_time_trace_bif(Process *p, ErtsCodeInfo *ci, Eterm *call_time);
+void erts_set_time_trace_bif(ErtsCodeInfo *ci, enum erts_break_op);
+void erts_clear_time_trace_bif(ErtsCodeInfo *ci);
-BeamInstr *erts_find_local_func(Eterm mfa[3]);
+ErtsCodeInfo *erts_find_local_func(ErtsCodeMFA *mfa);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-extern erts_smp_atomic32_t erts_active_bp_index;
-extern erts_smp_atomic32_t erts_staging_bp_index;
+extern erts_atomic32_t erts_active_bp_index;
+extern erts_atomic32_t erts_staging_bp_index;
ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void)
{
- return erts_smp_atomic32_read_nob(&erts_active_bp_index);
+ return erts_atomic32_read_nob(&erts_active_bp_index);
}
ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void)
{
- return erts_smp_atomic32_read_nob(&erts_staging_bp_index);
+ return erts_atomic32_read_nob(&erts_staging_bp_index);
}
#endif
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index a4ad3e7886..9633de2021 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,6 +39,8 @@
#include "beam_bp.h"
#include "erl_binary.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
+#include "beam_catches.h"
#ifdef ARCH_64
# define HEXF "%016bpX"
@@ -50,7 +52,11 @@
void dbg_bt(Process* p, Eterm* sp);
void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg);
-static int print_op(int to, void *to_arg, int op, int size, BeamInstr* addr);
+static int print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr);
+static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif);
+static BeamInstr* f_to_addr(BeamInstr* base, int op, BeamInstr* ap);
+static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap);
+static void print_byte_string(fmtfn_t to, void *to_arg, byte* str, Uint bytes);
BIF_RETTYPE
erts_debug_same_2(BIF_ALIST_2)
@@ -115,7 +121,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
Eterm MFA = BIF_ARG_1;
Eterm boolean = BIF_ARG_2;
Eterm* tp;
- Eterm mfa[3];
+ ErtsCodeMFA mfa;
int i;
int specified = 0;
Eterm res;
@@ -131,33 +137,34 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
if (*tp != make_arityval(3)) {
goto error;
}
- mfa[0] = tp[1];
- mfa[1] = tp[2];
- mfa[2] = tp[3];
- if (!is_atom(mfa[0]) || !is_atom(mfa[1]) ||
- (!is_small(mfa[2]) && mfa[2] != am_Underscore)) {
+ if (!is_atom(tp[1]) || !is_atom(tp[2]) ||
+ (!is_small(tp[3]) && tp[3] != am_Underscore)) {
goto error;
}
- for (i = 0; i < 3 && mfa[i] != am_Underscore; i++, specified++) {
+ for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) {
/* Empty loop body */
}
for (i = specified; i < 3; i++) {
- if (mfa[i] != am_Underscore) {
+ if (tp[i+1] != am_Underscore) {
goto error;
}
}
- if (is_small(mfa[2])) {
- mfa[2] = signed_val(mfa[2]);
+
+ mfa.module = tp[1];
+ mfa.function = tp[2];
+
+ if (is_small(tp[3])) {
+ mfa.arity = signed_val(tp[3]);
}
if (!erts_try_seize_code_write_permission(BIF_P)) {
ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2],
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
- erts_bp_match_functions(&f, mfa, specified);
+ erts_bp_match_functions(&f, &mfa, specified);
if (boolean == am_true) {
erts_set_debug_break(&f);
erts_install_breakpoints(&f);
@@ -171,8 +178,8 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
res = make_small(f.matched);
erts_bp_free_matched_functions(&f);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
return res;
@@ -194,9 +201,9 @@ void debug_dump_code(BeamInstr *I, int num)
erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr);
instr = (BeamInstr) code_ptr[0];
for (i = 0; i < NUM_SPECIFIC_OPS; i++) {
- if (instr == (BeamInstr) BeamOp(i) && opc[i].name[0] != '\0') {
+ if (BeamIsOpCode(instr, i) && opc[i].name[0] != '\0') {
code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp,
- i, opc[i].sz-1, code_ptr+1) + 1;
+ i, opc[i].sz-1, code_ptr) + 1;
break;
}
}
@@ -221,11 +228,11 @@ erts_debug_instructions_0(BIF_ALIST_0)
Eterm res = NIL;
for (i = 0; i < num_instructions; i++) {
- needed += 2*strlen(opc[i].name);
+ needed += 2*sys_strlen(opc[i].name);
}
hp = HAlloc(BIF_P, needed);
for (i = num_instructions-1; i >= 0; i--) {
- Eterm s = erts_bld_string_n(&hp, 0, opc[i].name, strlen(opc[i].name));
+ Eterm s = erts_bld_string_n(&hp, 0, opc[i].name, sys_strlen(opc[i].name));
res = erts_bld_cons(&hp, 0, s, res);
}
return res;
@@ -241,9 +248,9 @@ erts_debug_disassemble_1(BIF_ALIST_1)
Eterm* tp;
Eterm bin;
Eterm mfa;
- BeamInstr* funcinfo = NULL; /* Initialized to eliminate warning. */
+ ErtsCodeMFA *cmfa = NULL;
BeamCodeHeader* code_hdr;
- BeamInstr* code_ptr = NULL; /* Initialized to eliminate warning. */
+ BeamInstr *code_ptr;
BeamInstr instr;
BeamInstr uaddr;
Uint hsz;
@@ -251,7 +258,7 @@ erts_debug_disassemble_1(BIF_ALIST_1)
if (term_to_UWord(addr, &uaddr)) {
code_ptr = (BeamInstr *) uaddr;
- if ((funcinfo = find_function_from_pc(code_ptr)) == NULL) {
+ if ((cmfa = find_function_from_pc(code_ptr)) == NULL) {
BIF_RET(am_false);
}
} else if (is_tuple(addr)) {
@@ -282,24 +289,22 @@ erts_debug_disassemble_1(BIF_ALIST_1)
* such as erts_debug:apply/4. Then search for it in the module.
*/
if ((ep = erts_find_function(mod, name, arity, code_ix)) != NULL) {
- /* XXX: add "&& ep->address != ep->code+3" condition?
+ /* XXX: add "&& ep->address != ep->code" condition?
* Consider a traced function.
- * Its ep will have ep->address == ep->code+3.
+ * Its ep will have ep->address == ep->code.
* erts_find_function() will return the non-NULL ep.
* Below we'll try to derive a code_ptr from ep->address.
* But this code_ptr will point to the start of the Export,
* not the function's func_info instruction. BOOM !?
*/
- code_ptr = ((BeamInstr *) ep->addressv[code_ix]) - 5;
- funcinfo = code_ptr+2;
+ cmfa = erts_code_to_codemfa(ep->addressv[code_ix]);
} else if (modp == NULL || (code_hdr = modp->curr.code_hdr) == NULL) {
BIF_RET(am_undef);
} else {
n = code_hdr->num_functions;
for (i = 0; i < n; i++) {
- code_ptr = code_hdr->functions[i];
- if (code_ptr[3] == name && code_ptr[4] == arity) {
- funcinfo = code_ptr+2;
+ cmfa = &code_hdr->functions[i]->mfa;
+ if (cmfa->function == name && cmfa->arity == arity) {
break;
}
}
@@ -307,6 +312,7 @@ erts_debug_disassemble_1(BIF_ALIST_1)
BIF_RET(am_undef);
}
}
+ code_ptr = (BeamInstr*)erts_code_to_codeinfo(erts_codemfa_to_code(cmfa));
} else {
goto error;
}
@@ -315,9 +321,9 @@ erts_debug_disassemble_1(BIF_ALIST_1)
erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr);
instr = (BeamInstr) code_ptr[0];
for (i = 0; i < NUM_SPECIFIC_OPS; i++) {
- if (instr == (BeamInstr) BeamOp(i) && opc[i].name[0] != '\0') {
+ if (BeamIsOpCode(instr, i) && opc[i].name[0] != '\0') {
code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp,
- i, opc[i].sz-1, code_ptr+1) + 1;
+ i, opc[i].sz-1, code_ptr) + 1;
break;
}
}
@@ -332,9 +338,10 @@ erts_debug_disassemble_1(BIF_ALIST_1)
(void) erts_bld_uword(NULL, &hsz, (BeamInstr) code_ptr);
hp = HAlloc(p, hsz);
addr = erts_bld_uword(&hp, NULL, (BeamInstr) code_ptr);
- ASSERT(is_atom(funcinfo[0]) || funcinfo[0] == NIL);
- ASSERT(is_atom(funcinfo[1]) || funcinfo[1] == NIL);
- mfa = TUPLE3(hp, (Eterm) funcinfo[0], (Eterm) funcinfo[1], make_small((Eterm) funcinfo[2]));
+ ASSERT(is_atom(cmfa->module) || is_nil(cmfa->module));
+ ASSERT(is_atom(cmfa->function) || is_nil(cmfa->function));
+ mfa = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
hp += 4;
return TUPLE3(hp, addr, bin, mfa);
}
@@ -346,11 +353,12 @@ dbg_bt(Process* p, Eterm* sp)
while (sp < stack) {
if (is_CP(*sp)) {
- BeamInstr* addr = find_function_from_pc(cp_val(*sp));
- if (addr)
+ ErtsCodeMFA* cmfa = find_function_from_pc(cp_val(*sp));
+ if (cmfa)
erts_fprintf(stderr,
HEXF ": %T:%T/%bpu\n",
- addr, (Eterm) addr[0], (Eterm) addr[1], addr[2]);
+ &cmfa->module, cmfa->module,
+ cmfa->function, cmfa->arity);
}
sp++;
}
@@ -359,17 +367,17 @@ dbg_bt(Process* p, Eterm* sp)
void
dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg)
{
- BeamInstr* f = find_function_from_pc(addr);
+ ErtsCodeMFA* cmfa = find_function_from_pc(addr);
- if (f == NULL) {
+ if (cmfa == NULL) {
erts_fprintf(stderr, "???\n");
} else {
int arity;
int i;
- addr = f;
- arity = addr[2];
- erts_fprintf(stderr, HEXF ": %T:%T(", addr, (Eterm) addr[0], (Eterm) addr[1]);
+ arity = cmfa->arity;
+ erts_fprintf(stderr, HEXF ": %T:%T(", addr,
+ cmfa->module, cmfa->function);
for (i = 0; i < arity; i++)
erts_fprintf(stderr, i ? ", %T" : "%T", i ? reg[i] : x0);
erts_fprintf(stderr, ")\n");
@@ -377,7 +385,7 @@ dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg)
}
static int
-print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
+print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr)
{
int i;
BeamInstr tag;
@@ -390,6 +398,7 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
BeamInstr args[8]; /* Arguments for this instruction. */
BeamInstr* ap; /* Pointer to arguments. */
BeamInstr* unpacked; /* Unpacked arguments */
+ BeamInstr* first_arg; /* First argument */
start_prog = opc[op].pack;
@@ -399,8 +408,14 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
* Avoid copying because instructions containing bignum operands
* are bigger than actually declared.
*/
- ap = (BeamInstr *) addr;
+ addr++;
+ ap = addr;
} else {
+#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
+ BeamInstr instr_word = addr[0];
+#endif
+ addr++;
+
/*
* Copy all arguments to a local buffer for the unpacking.
*/
@@ -416,30 +431,31 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
* the packing program backwards and in reverse.
*/
- prog = start_prog + strlen(start_prog);
+ prog = start_prog + sys_strlen(start_prog);
while (start_prog < prog) {
prog--;
switch (*prog) {
+ case 'f':
case 'g':
+ case 'q':
*ap++ = *--sp;
break;
- case 'i': /* Initialize packing accumulator. */
- *ap++ = packed;
- break;
- case 's':
- *ap++ = packed & 0x3ff;
- packed >>= 10;
+#ifdef ARCH_64
+ case '1': /* Tightest shift */
+ *ap++ = (packed & BEAM_TIGHTEST_MASK) << 3;
+ packed >>= BEAM_TIGHTEST_SHIFT;
break;
- case '0': /* Tight shift */
+#endif
+ case '2': /* Tight shift */
*ap++ = packed & BEAM_TIGHT_MASK;
packed >>= BEAM_TIGHT_SHIFT;
break;
- case '6': /* Shift 16 steps */
+ case '3': /* Loose shift */
*ap++ = packed & BEAM_LOOSE_MASK;
packed >>= BEAM_LOOSE_SHIFT;
break;
#ifdef ARCH_64
- case 'w': /* Shift 32 steps */
+ case '4': /* Shift 32 steps */
*ap++ = packed & BEAM_WIDE_MASK;
packed >>= BEAM_WIDE_SHIFT;
break;
@@ -450,13 +466,25 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
case 'P':
packed = *--sp;
break;
+#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
+ case '#': /* -1 */
+ case '$': /* -2 */
+ case '%': /* -3 */
+ case '&': /* -4 */
+ case '\'': /* -5 */
+ case '(': /* -6 */
+ packed = (packed << BEAM_WIDE_SHIFT) | BeamExtraData(instr_word);
+ break;
+#endif
default:
- ASSERT(0);
+ erts_exit(ERTS_ERROR_EXIT, "beam_debug: invalid packing op: %c\n", *prog);
}
}
ap = args;
}
+ first_arg = ap;
+
/*
* Print the name and all operands of the instructions.
*/
@@ -485,6 +513,14 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
case 'n': /* Nil */
erts_print(to, to_arg, "[]");
break;
+ case 'S': /* Register */
+ {
+ Uint reg_type = (*ap & 1) ? 'y' : 'x';
+ Uint n = ap[0] / sizeof(Eterm);
+ erts_print(to, to_arg, "%c(%d)", reg_type, n);
+ ap++;
+ break;
+ }
case 's': /* Any source (tagged constant or register) */
tag = loader_tag(*ap);
if (tag == LOADER_X_REG) {
@@ -518,61 +554,111 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
}
ap++;
break;
- case 'I': /* Untagged integer. */
- case 't':
- erts_print(to, to_arg, "%d", *ap);
- ap++;
- break;
- case 'f': /* Destination label */
- {
- BeamInstr* f = find_function_from_pc((BeamInstr *)*ap);
- if (f+3 != (BeamInstr *) *ap) {
- erts_print(to, to_arg, "f(" HEXF ")", *ap);
- } else {
- erts_print(to, to_arg, "%T:%T/%bpu", (Eterm) f[0], (Eterm) f[1], f[2]);
+ case 't': /* Untagged integers */
+ case 'I':
+ case 'W':
+ switch (op) {
+ case op_i_gc_bif1_jWstd:
+ case op_i_gc_bif2_jWtssd:
+ case op_i_gc_bif3_jWtssd:
+ {
+ const ErtsGcBif* p;
+ BifFunction gcf = (BifFunction) *ap;
+ for (p = erts_gc_bifs; p->bif != 0; p++) {
+ if (p->gc_bif == gcf) {
+ print_bif_name(to, to_arg, p->bif);
+ break;
+ }
+ }
+ if (p->bif == 0) {
+ erts_print(to, to_arg, "%d", (Uint)gcf);
+ }
+ break;
}
- ap++;
+ case op_i_make_fun_Wt:
+ if (*sign == 'W') {
+ ErlFunEntry* fe = (ErlFunEntry *) *ap;
+ ErtsCodeMFA* cmfa = find_function_from_pc(fe->address);
+ erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module,
+ cmfa->function, cmfa->arity);
+ } else {
+ erts_print(to, to_arg, "%d", *ap);
+ }
+ break;
+ case op_i_bs_match_string_xfWW:
+ if (ap - first_arg < 3) {
+ erts_print(to, to_arg, "%d", *ap);
+ } else {
+ Uint bits = ap[-1];
+ Uint bytes = (bits+7)/8;
+ byte* str = (byte *) *ap;
+ print_byte_string(to, to_arg, str, bytes);
+ }
+ break;
+ case op_bs_put_string_WW:
+ if (ap - first_arg == 0) {
+ erts_print(to, to_arg, "%d", *ap);
+ } else {
+ Uint bytes = ap[-1];
+ byte* str = (byte *) ap[0];
+ print_byte_string(to, to_arg, str, bytes);
+ }
+ break;
+ default:
+ erts_print(to, to_arg, "%d", *ap);
}
+ ap++;
break;
+ case 'f': /* Destination label */
+ switch (op) {
+ case op_catch_yf:
+ erts_print(to, to_arg, "f(" HEXF ")", catch_pc((BeamInstr)*ap));
+ break;
+ default:
+ {
+ BeamInstr* target = f_to_addr(addr, op, ap);
+ ErtsCodeMFA* cmfa = find_function_from_pc(target);
+ if (!cmfa || erts_codemfa_to_code(cmfa) != target) {
+ erts_print(to, to_arg, "f(" HEXF ")", target);
+ } else {
+ erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module,
+ cmfa->function, cmfa->arity);
+ }
+ ap++;
+ }
+ break;
+ }
+ break;
case 'p': /* Pointer (to label) */
{
- BeamInstr* f = find_function_from_pc((BeamInstr *)*ap);
- if (f+3 != (BeamInstr *) *ap) {
- erts_print(to, to_arg, "p(" HEXF ")", *ap);
- } else {
- erts_print(to, to_arg, "%T:%T/%bpu", (Eterm) f[0], (Eterm) f[1], f[2]);
- }
+ BeamInstr* target = f_to_addr(addr, op, ap);
+ erts_print(to, to_arg, "p(" HEXF ")", target);
ap++;
}
break;
case 'j': /* Pointer (to label) */
- erts_print(to, to_arg, "j(" HEXF ")", *ap);
+ if (*ap == 0) {
+ erts_print(to, to_arg, "j(0)");
+ } else {
+ BeamInstr* target = f_to_addr(addr, op, ap);
+ erts_print(to, to_arg, "j(" HEXF ")", target);
+ }
ap++;
break;
case 'e': /* Export entry */
{
Export* ex = (Export *) *ap;
erts_print(to, to_arg,
- "%T:%T/%bpu", (Eterm) ex->code[0], (Eterm) ex->code[1], ex->code[2]);
+ "%T:%T/%bpu", (Eterm) ex->info.mfa.module,
+ (Eterm) ex->info.mfa.function,
+ ex->info.mfa.arity);
ap++;
}
break;
case 'F': /* Function definition */
break;
case 'b':
- for (i = 0; i < BIF_SIZE; i++) {
- BifFunction bif = (BifFunction) *ap;
- if (bif == bif_table[i].f) {
- break;
- }
- }
- if (i == BIF_SIZE) {
- erts_print(to, to_arg, "b(%d)", (Uint) *ap);
- } else {
- Eterm name = bif_table[i].name;
- unsigned arity = bif_table[i].arity;
- erts_print(to, to_arg, "%T/%u", name, arity);
- }
+ print_bif_name(to, to_arg, (BifFunction) *ap);
ap++;
break;
case 'P': /* Byte offset into tuple (see beam_load.c) */
@@ -581,7 +667,7 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
ap++;
break;
case 'l': /* fr(N) */
- erts_print(to, to_arg, "fr(%d)", loader_reg_index(ap[0]));
+ erts_print(to, to_arg, "fr(%d)", ap[0] / sizeof(FloatDef));
ap++;
break;
default:
@@ -599,12 +685,22 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
unpacked = ap;
ap = addr + size;
+
+ /*
+ * In the code below, never use ap[-1], ap[-2], ...
+ * (will not work if the arguments have been packed).
+ *
+ * Instead use unpacked[-1], unpacked[-2], ...
+ */
switch (op) {
case op_i_select_val_lins_xfI:
case op_i_select_val_lins_yfI:
+ case op_i_select_val_bins_xfI:
+ case op_i_select_val_bins_yfI:
{
- int n = ap[-1];
+ int n = unpacked[-1];
int ix = n;
+ Sint32* jump_tab = (Sint32 *)(ap + n);
while (ix--) {
erts_print(to, to_arg, "%T ", (Eterm) ap[0]);
@@ -613,30 +709,19 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
}
ix = n;
while (ix--) {
- erts_print(to, to_arg, "f(" HEXF ") ", (Eterm) ap[0]);
- ap++;
- size++;
- }
- }
- break;
- case op_i_select_val_bins_xfI:
- case op_i_select_val_bins_yfI:
- {
- int n = ap[-1];
-
- while (n > 0) {
- erts_print(to, to_arg, "%T f(" HEXF ") ", (Eterm) ap[0], ap[1]);
- ap += 2;
- size += 2;
- n--;
+ BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
+ erts_print(to, to_arg, "f(" HEXF ") ", target);
+ jump_tab++;
}
+ size += (n+1) / 2;
}
break;
case op_i_select_tuple_arity_xfI:
case op_i_select_tuple_arity_yfI:
{
- int n = ap[-1];
+ int n = unpacked[-1];
int ix = n - 1; /* without sentinel */
+ Sint32* jump_tab = (Sint32 *)(ap + n);
while (ix--) {
Uint arity = arityval(ap[0]);
@@ -650,39 +735,62 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
size++;
ix = n;
while (ix--) {
- erts_print(to, to_arg, "f(" HEXF ") ", ap[0]);
- ap++;
- size++;
+ BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
+ erts_print(to, to_arg, "f(" HEXF ") ", target);
+ jump_tab++;
}
+ size += (n+1) / 2;
}
break;
- case op_i_jump_on_val_xfII:
- case op_i_jump_on_val_yfII:
+ case op_i_select_val2_xfcc:
+ case op_i_select_val2_yfcc:
+ case op_i_select_tuple_arity2_xfAA:
+ case op_i_select_tuple_arity2_yfAA:
+ {
+ Sint32* jump_tab = (Sint32 *) ap;
+ BeamInstr* target;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ target = f_to_addr_packed(addr, op, jump_tab++);
+ erts_print(to, to_arg, "f(" HEXF ") ", target);
+ }
+ size += 1;
+ }
+ break;
+ case op_i_jump_on_val_xfIW:
+ case op_i_jump_on_val_yfIW:
{
- int n;
- for (n = ap[-2]; n > 0; n--) {
- erts_print(to, to_arg, "f(" HEXF ") ", ap[0]);
- ap++;
- size++;
+ int n = unpacked[-2];
+ Sint32* jump_tab = (Sint32 *) ap;
+
+ size += (n+1) / 2;
+ while (n-- > 0) {
+ BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
+ erts_print(to, to_arg, "f(" HEXF ") ", target);
+ jump_tab++;
}
}
break;
case op_i_jump_on_val_zero_xfI:
case op_i_jump_on_val_zero_yfI:
{
- int n;
- for (n = ap[-1]; n > 0; n--) {
- erts_print(to, to_arg, "f(" HEXF ") ", ap[0]);
- ap++;
- size++;
+ int n = unpacked[-1];
+ Sint32* jump_tab = (Sint32 *) ap;
+
+ size += (n+1) / 2;
+ while (n-- > 0) {
+ BeamInstr* target = f_to_addr_packed(addr, op, jump_tab);
+ erts_print(to, to_arg, "f(" HEXF ") ", target);
+ jump_tab++;
}
}
break;
case op_i_put_tuple_xI:
case op_i_put_tuple_yI:
- case op_new_map_dII:
- case op_update_map_assoc_jsdII:
- case op_update_map_exact_jsdII:
+ case op_new_map_dtI:
+ case op_update_map_assoc_sdtI:
+ case op_update_map_exact_jsdtI:
{
int n = unpacked[-1];
@@ -692,7 +800,7 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
erts_print(to, to_arg, " x(%d)", loader_x_reg_index(ap[0]));
break;
case LOADER_Y_REG:
- erts_print(to, to_arg, " x(%d)", loader_y_reg_index(ap[0]));
+ erts_print(to, to_arg, " y(%d)", loader_y_reg_index(ap[0]) - CP_SIZE);
break;
default:
erts_print(to, to_arg, " %T", (Eterm) ap[0]);
@@ -702,6 +810,27 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
}
}
break;
+ case op_i_new_small_map_lit_dtq:
+ {
+ Eterm *tp = tuple_val(unpacked[-1]);
+ int n = arityval(*tp);
+
+ while (n > 0) {
+ switch (loader_tag(ap[0])) {
+ case LOADER_X_REG:
+ erts_print(to, to_arg, " x(%d)", loader_x_reg_index(ap[0]));
+ break;
+ case LOADER_Y_REG:
+ erts_print(to, to_arg, " y(%d)", loader_y_reg_index(ap[0]) - CP_SIZE);
+ break;
+ default:
+ erts_print(to, to_arg, " %T", (Eterm) ap[0]);
+ break;
+ }
+ ap++, size++, n--;
+ }
+ }
+ break;
case op_i_get_map_elements_fsI:
{
int n = unpacked[-1];
@@ -715,7 +844,7 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
erts_print(to, to_arg, " x(%d)", loader_x_reg_index(ap[0]));
break;
case LOADER_Y_REG:
- erts_print(to, to_arg, " y(%d)", loader_y_reg_index(ap[0]));
+ erts_print(to, to_arg, " y(%d)", loader_y_reg_index(ap[0]) - CP_SIZE);
break;
default:
erts_print(to, to_arg, " %T", (Eterm) ap[0]);
@@ -731,3 +860,431 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
return size;
}
+
+static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif)
+{
+ int i;
+
+ for (i = 0; i < BIF_SIZE; i++) {
+ if (bif == bif_table[i].f) {
+ break;
+ }
+ }
+ if (i == BIF_SIZE) {
+ erts_print(to, to_arg, "b(%d)", (Uint) bif);
+ } else {
+ Eterm name = bif_table[i].name;
+ unsigned arity = bif_table[i].arity;
+ erts_print(to, to_arg, "%T/%u", name, arity);
+ }
+}
+
+static BeamInstr* f_to_addr(BeamInstr* base, int op, BeamInstr* ap)
+{
+ return base - 1 + opc[op].adjust + (Sint32) *ap;
+}
+
+static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap)
+{
+ return base - 1 + opc[op].adjust + *ap;
+}
+
+static void print_byte_string(fmtfn_t to, void *to_arg, byte* str, Uint bytes)
+{
+ Uint i;
+
+ for (i = 0; i < bytes; i++) {
+ erts_print(to, to_arg, "%02X", str[i]);
+ }
+}
+
+/*
+ * Dirty BIF testing.
+ *
+ * The erts_debug:dirty_cpu/2, erts_debug:dirty_io/1, and
+ * erts_debug:dirty/3 BIFs are used by the dirty_bif_SUITE
+ * test suite.
+ */
+
+static int ms_wait(Process *c_p, Eterm etimeout, int busy);
+static int dirty_send_message(Process *c_p, Eterm to, Eterm tag);
+static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I);
+
+/*
+ * erts_debug:dirty_cpu/2 is statically determined to execute on
+ * a dirty CPU scheduler (see erts_dirty_bif.tab).
+ */
+BIF_RETTYPE
+erts_debug_dirty_cpu_2(BIF_ALIST_2)
+{
+ return dirty_test(BIF_P, am_dirty_cpu, BIF_ARG_1, BIF_ARG_2, BIF_I);
+}
+
+/*
+ * erts_debug:dirty_io/2 is statically determined to execute on
+ * a dirty I/O scheduler (see erts_dirty_bif.tab).
+ */
+BIF_RETTYPE
+erts_debug_dirty_io_2(BIF_ALIST_2)
+{
+ return dirty_test(BIF_P, am_dirty_io, BIF_ARG_1, BIF_ARG_2, BIF_I);
+}
+
+/*
+ * erts_debug:dirty/3 executes on a normal scheduler.
+ */
+BIF_RETTYPE
+erts_debug_dirty_3(BIF_ALIST_3)
+{
+ Eterm argv[2];
+ switch (BIF_ARG_1) {
+ case am_normal:
+ return dirty_test(BIF_P, am_normal, BIF_ARG_2, BIF_ARG_3, BIF_I);
+ case am_dirty_cpu:
+ argv[0] = BIF_ARG_2;
+ argv[1] = BIF_ARG_3;
+ return erts_schedule_bif(BIF_P,
+ argv,
+ BIF_I,
+ erts_debug_dirty_cpu_2,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erts_debug,
+ am_dirty_cpu,
+ 2);
+ case am_dirty_io:
+ argv[0] = BIF_ARG_2;
+ argv[1] = BIF_ARG_3;
+ return erts_schedule_bif(BIF_P,
+ argv,
+ BIF_I,
+ erts_debug_dirty_io_2,
+ ERTS_SCHED_DIRTY_IO,
+ am_erts_debug,
+ am_dirty_io,
+ 2);
+ default:
+ BIF_ERROR(BIF_P, EXC_BADARG);
+ }
+}
+
+
+static BIF_RETTYPE
+dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I)
+{
+ BIF_RETTYPE ret;
+ if (am_scheduler == arg1) {
+ ErtsSchedulerData *esdp;
+ if (arg2 != am_type)
+ goto badarg;
+ esdp = erts_proc_sched_data(c_p);
+ if (!esdp)
+ goto scheduler_type_error;
+
+ switch (esdp->type) {
+ case ERTS_SCHED_NORMAL:
+ ERTS_BIF_PREP_RET(ret, am_normal);
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ ERTS_BIF_PREP_RET(ret, am_dirty_cpu);
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ ERTS_BIF_PREP_RET(ret, am_dirty_io);
+ break;
+ default:
+ scheduler_type_error:
+ ERTS_BIF_PREP_RET(ret, am_error);
+ break;
+ }
+ }
+ else if (am_error == arg1) {
+ switch (arg2) {
+ case am_notsup:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOTSUP);
+ break;
+ case am_undef:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF);
+ break;
+ case am_badarith:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_BADARITH);
+ break;
+ case am_noproc:
+ ERTS_BIF_PREP_ERROR(ret, c_p, EXC_NOPROC);
+ break;
+ case am_system_limit:
+ ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT);
+ break;
+ case am_badarg:
+ default:
+ goto badarg;
+ }
+ }
+ else if (am_copy == arg1) {
+ int i;
+ Eterm res;
+
+ for (res = NIL, i = 0; i < 1000; i++) {
+ Eterm *hp, sz;
+ Eterm cpy;
+ /* We do not want this to be optimized,
+ but rather the oposite... */
+ sz = size_object(arg2);
+ hp = HAlloc(c_p, sz);
+ cpy = copy_struct(arg2, sz, &hp, &c_p->off_heap);
+ hp = HAlloc(c_p, 2);
+ res = CONS(hp, cpy, res);
+ }
+
+ ERTS_BIF_PREP_RET(ret, res);
+ }
+ else if (am_send == arg1) {
+ dirty_send_message(c_p, arg2, am_ok);
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("wait", arg1)) {
+ if (!ms_wait(c_p, arg2, type == am_dirty_cpu))
+ goto badarg;
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("reschedule", arg1)) {
+ /*
+ * Reschedule operation after decrement of two until we reach
+ * zero. Switch between dirty scheduler types when 'n' is
+ * evenly divided by 4. If the initial value wasn't evenly
+ * dividable by 2, throw badarg exception.
+ */
+ Eterm next_type;
+ Sint n;
+ if (!term_to_Sint(arg2, &n) || n < 0)
+ goto badarg;
+ if (n == 0)
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ else {
+ Eterm argv[3];
+ Eterm eint = erts_make_integer((Uint) (n - 2), c_p);
+ if (n % 4 != 0)
+ next_type = type;
+ else {
+ switch (type) {
+ case am_dirty_cpu: next_type = am_dirty_io; break;
+ case am_dirty_io: next_type = am_normal; break;
+ case am_normal: next_type = am_dirty_cpu; break;
+ default: goto badarg;
+ }
+ }
+ switch (next_type) {
+ case am_dirty_io:
+ argv[0] = arg1;
+ argv[1] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_io_2,
+ ERTS_SCHED_DIRTY_IO,
+ am_erts_debug,
+ am_dirty_io,
+ 2);
+ break;
+ case am_dirty_cpu:
+ argv[0] = arg1;
+ argv[1] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_cpu_2,
+ ERTS_SCHED_DIRTY_CPU,
+ am_erts_debug,
+ am_dirty_cpu,
+ 2);
+ break;
+ case am_normal:
+ argv[0] = am_normal;
+ argv[1] = arg1;
+ argv[2] = eint;
+ ret = erts_schedule_bif(c_p,
+ argv,
+ I,
+ erts_debug_dirty_3,
+ ERTS_SCHED_NORMAL,
+ am_erts_debug,
+ am_dirty,
+ 3);
+ break;
+ default:
+ goto badarg;
+ }
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("ready_wait6_done", arg1)) {
+ ERTS_DECL_AM(ready);
+ ERTS_DECL_AM(done);
+ dirty_send_message(c_p, arg2, AM_ready);
+ ms_wait(c_p, make_small(6000), 0);
+ dirty_send_message(c_p, arg2, AM_done);
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ }
+ else if (ERTS_IS_ATOM_STR("alive_waitexiting", arg1)) {
+ Process *real_c_p = erts_proc_shadow2real(c_p);
+ Eterm *hp, *hp2;
+ Uint sz;
+ int i;
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ int dirty_io = esdp->type == ERTS_SCHED_DIRTY_IO;
+
+ if (ERTS_PROC_IS_EXITING(real_c_p))
+ goto badarg;
+ dirty_send_message(c_p, arg2, am_alive);
+
+ /* Wait until dead */
+ while (!ERTS_PROC_IS_EXITING(real_c_p)) {
+ if (dirty_io)
+ ms_wait(c_p, make_small(100), 0);
+ else
+ erts_thr_yield();
+ }
+
+ ms_wait(c_p, make_small(1000), 0);
+
+ /* Should still be able to allocate memory */
+ hp = HAlloc(c_p, 3); /* Likely on heap */
+ sz = 10000;
+ hp2 = HAlloc(c_p, sz); /* Likely in heap fragment */
+ *hp2 = make_pos_bignum_header(sz);
+ for (i = 1; i < sz; i++)
+ hp2[i] = (Eterm) 4711;
+ ERTS_BIF_PREP_RET(ret, TUPLE2(hp, am_ok, make_big(hp2)));
+ }
+ else {
+ badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ }
+ return ret;
+}
+
+
+static int
+dirty_send_message(Process *c_p, Eterm to, Eterm tag)
+{
+ ErtsProcLocks c_p_locks, rp_locks;
+ Process *rp, *real_c_p;
+ Eterm msg, *hp;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+
+ ASSERT(is_immed(tag));
+
+ real_c_p = erts_proc_shadow2real(c_p);
+ if (real_c_p != c_p)
+ c_p_locks = 0;
+ else
+ c_p_locks = ERTS_PROC_LOCK_MAIN;
+
+ ASSERT(real_c_p->common.id == c_p->common.id);
+
+ rp = erts_pid2proc_opt(real_c_p, c_p_locks,
+ to, 0,
+ ERTS_P2P_FLG_INC_REFC);
+
+ if (!rp)
+ return 0;
+
+ rp_locks = 0;
+ mp = erts_alloc_message_heap(rp, &rp_locks, 3, &hp, &ohp);
+
+ msg = TUPLE2(hp, tag, c_p->common.id);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
+
+ if (rp == real_c_p)
+ rp_locks &= ~c_p_locks;
+ if (rp_locks)
+ erts_proc_unlock(rp, rp_locks);
+
+ erts_proc_dec_refc(rp);
+
+ return 1;
+}
+
+static int
+ms_wait(Process *c_p, Eterm etimeout, int busy)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ ErtsMonotonicTime time, timeout_time;
+ Sint64 ms;
+
+ if (!term_to_Sint64(etimeout, &ms))
+ return 0;
+
+ time = erts_get_monotonic_time(esdp);
+
+ if (ms < 0)
+ timeout_time = time;
+ else
+ timeout_time = time + ERTS_MSEC_TO_MONOTONIC(ms);
+
+ while (time < timeout_time) {
+ if (busy)
+ erts_thr_yield();
+ else {
+ ErtsMonotonicTime timeout = timeout_time - time;
+
+#ifdef __WIN32__
+ Sleep((DWORD) ERTS_MONOTONIC_TO_MSEC(timeout));
+#else
+ {
+ ErtsMonotonicTime to = ERTS_MONOTONIC_TO_USEC(timeout);
+ struct timeval tv;
+
+ tv.tv_sec = (long) to / (1000*1000);
+ tv.tv_usec = (long) to % (1000*1000);
+
+ select(0, NULL, NULL, NULL, &tv);
+ }
+#endif
+ }
+
+ time = erts_get_monotonic_time(esdp);
+ }
+ return 1;
+}
+
+
+# define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit())
+
+/*
+ * The below functions is for testing of the stack
+ * limit functionality. They are intentionally
+ * written body recursive in order to prevent
+ * last call optimization...
+ */
+
+UWord
+erts_check_stack_recursion_downwards(char *start_c)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+ UWord res;
+ if (erts_check_below_limit(&c, limit + 1024))
+ return (char *) erts_ptr_id(start_c) - (char *) erts_ptr_id(&c);
+ res = erts_check_stack_recursion_downwards(start_c);
+ erts_ptr_id(&c);
+ return res;
+}
+
+UWord
+erts_check_stack_recursion_upwards(char *start_c)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+ UWord res;
+ if (erts_check_above_limit(&c, limit - 1024))
+ return (char *) erts_ptr_id(&c) - (char *) erts_ptr_id(start_c);
+ res = erts_check_stack_recursion_upwards(start_c);
+ erts_ptr_id(&c);
+ return res;
+}
+
+int
+erts_is_above_stack_limit(char *ptr)
+{
+ return (char *) ptr > ERTS_STACK_LIMIT;
+}
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index ef4cdf9d5a..e909a0b4da 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,56 +38,53 @@
#include "beam_bp.h"
#include "beam_catches.h"
#include "erl_thr_progress.h"
+#include "erl_nfunc_sched.h"
#ifdef HIPE
#include "hipe_mode_switch.h"
#include "hipe_bif1.h"
#endif
#include "dtrace-wrapper.h"
+#include "erl_proc_sig_queue.h"
/* #define HARDDEBUG 1 */
#if defined(NO_JUMP_TABLE)
# define OpCase(OpCode) case op_##OpCode
# define CountCase(OpCode) case op_count_##OpCode
-# define OpCode(OpCode) ((Uint*)op_##OpCode)
-# define Goto(Rel) {Go = (int)(UWord)(Rel); goto emulator_loop;}
-# define LabelAddr(Addr) &&##Addr
+# define IsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == (BeamInstr)op_##OpCode)
+# define Goto(Rel) {Go = BeamCodeAddr(Rel); goto emulator_loop;}
+# define GotoPF(Rel) Goto(Rel)
#else
# define OpCase(OpCode) lb_##OpCode
# define CountCase(OpCode) lb_count_##OpCode
-# define Goto(Rel) goto *((void *)Rel)
-# define LabelAddr(Label) &&Label
-# define OpCode(OpCode) (&&lb_##OpCode)
+# define IsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == (BeamInstr)&&lb_##OpCode)
+# define Goto(Rel) goto *((void *)BeamCodeAddr(Rel))
+# define GotoPF(Rel) goto *((void *)Rel)
+# define LabelAddr(Label) &&Label
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
-# ifdef ERTS_SMP
-# define PROCESS_MAIN_CHK_LOCKS(P) \
-do { \
- if ((P)) \
- erts_proc_lc_chk_only_proc_main((P)); \
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
+# define PROCESS_MAIN_CHK_LOCKS(P) \
+do { \
+ if ((P)) \
+ erts_proc_lc_chk_only_proc_main((P)); \
+ ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); \
} while (0)
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
-do { \
- if ((P)) \
- erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \
- __FILE__, __LINE__); \
+# define ERTS_REQ_PROC_MAIN_LOCK(P) \
+do { \
+ if ((P)) \
+ erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \
+ __FILE__, __LINE__); \
} while (0)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \
do { \
if ((P)) \
erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \
} while (0)
-# else
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
-# define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0)
-# endif
#else
# define PROCESS_MAIN_CHK_LOCKS(P)
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
+# define ERTS_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_UNREQ_PROC_MAIN_LOCK(P)
#endif
/*
@@ -112,20 +109,18 @@ do { \
# define CHECK_ARGS(T)
#endif
-#ifndef MAX
-#define MAX(x, y) (((x) > (y)) ? (x) : (y))
-#endif
+#define CHECK_ALIGNED(Dst) ASSERT((((Uint)&Dst) & (sizeof(Uint)-1)) == 0)
-#define GET_BIF_MODULE(p) ((Eterm) (((Export *) p)->code[0]))
-#define GET_BIF_FUNCTION(p) ((Eterm) (((Export *) p)->code[1]))
-#define GET_BIF_ARITY(p) ((Eterm) (((Export *) p)->code[2]))
-#define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
+#define GET_BIF_MODULE(p) (p->info.mfa.module)
+#define GET_BIF_FUNCTION(p) (p->info.mfa.function)
+#define GET_BIF_ARITY(p) (p->info.mfa.arity)
+#define GET_BIF_ADDRESS(p) ((BifFunction) (p->beam[1]))
#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
/*
* We reuse some of fields in the save area in the process structure.
- * This is safe to do, since this space is only activly used when
+ * This is safe to do, since this space is only actively used when
* the process is switched out.
*/
#define REDS_IN(p) ((p)->def_arg_reg[5])
@@ -139,11 +134,11 @@ do { \
/* We don't check the range if an ordinary switch is used */
#ifdef NO_JUMP_TABLE
-#define VALID_INSTR(IP) ((UWord)(IP) < (NUMBER_OF_OPCODES*2+10))
+# define VALID_INSTR(IP) (BeamCodeAddr(IP) < (NUMBER_OF_OPCODES*2+10))
#else
-#define VALID_INSTR(IP) \
- ((SWord)LabelAddr(emulator_loop) <= (SWord)(IP) && \
- (SWord)(IP) < (SWord)LabelAddr(end_emulator_loop))
+# define VALID_INSTR(IP) \
+ ((BeamInstr)LabelAddr(emulator_loop) <= BeamCodeAddr(IP) && \
+ BeamCodeAddr(IP) < (BeamInstr)LabelAddr(end_emulator_loop))
#endif /* NO_JUMP_TABLE */
#define SET_CP(p, ip) \
@@ -157,51 +152,8 @@ do { \
/*
* Register target (X or Y register).
*/
-#define REG_TARGET(Target) (*(((Target) & 1) ? &yb(Target-1) : &xb(Target)))
-
-/*
- * Store a result into a register given a destination descriptor.
- */
-
-#define StoreResult(Result, DestDesc) \
- do { \
- Eterm stb_reg; \
- stb_reg = (DestDesc); \
- CHECK_TERM(Result); \
- REG_TARGET(stb_reg) = (Result); \
- } while (0)
-
-#define StoreSimpleDest(Src, Dest) Dest = (Src)
-
-/*
- * Store a result into a register and execute the next instruction.
- * Dst points to the word with a destination descriptor, which MUST
- * be just before the next instruction.
- */
-
-#define StoreBifResult(Dst, Result) \
- do { \
- BeamInstr* stb_next; \
- Eterm stb_reg; \
- stb_reg = Arg(Dst); \
- I += (Dst) + 2; \
- stb_next = (BeamInstr *) *I; \
- CHECK_TERM(Result); \
- REG_TARGET(stb_reg) = (Result); \
- Goto(stb_next); \
- } while (0)
-
-#define ClauseFail() goto jump_f
-
-#define SAVE_CP(X) \
- do { \
- *(X) = make_cp(c_p->cp); \
- c_p->cp = 0; \
- } while(0)
-
-#define RESTORE_CP(X) SET_CP(c_p, (BeamInstr *) cp_val(*(X)))
-#define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y))
+#define REG_TARGET_PTR(Target) (((Target) & 1) ? &yb((Target)-1) : &xb(Target))
/*
* Special Beam instructions.
@@ -211,14 +163,10 @@ BeamInstr beam_apply[2];
BeamInstr beam_exit[1];
BeamInstr beam_continue_exit[1];
-BeamInstr* em_call_error_handler;
-BeamInstr* em_apply_bif;
-BeamInstr* em_call_nif;
-
/* NOTE These should be the only variables containing trace instructions.
** Sometimes tests are form the instruction value, and sometimes
-** for the refering variable (one of these), and rouge references
+** for the referring variable (one of these), and rouge references
** will most likely cause chaos.
*/
BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
@@ -283,163 +231,24 @@ void** beam_ops;
HEAP_TOP((P)) = HTOP; \
(P)->stop = E; \
PROCESS_MAIN_CHK_LOCKS((P)); \
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
+ ERTS_UNREQ_PROC_MAIN_LOCK((P))
#define db(N) (N)
+#define fb(N) ((Sint)(Sint32)(N))
+#define jb(N) ((Sint)(Sint32)(N))
#define tb(N) (N)
-#define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
-#define yb(N) (*(Eterm *) (((unsigned char *)E) + (N)))
-#define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
+#define xb(N) (*ADD_BYTE_OFFSET(reg, N))
+#define yb(N) (*ADD_BYTE_OFFSET(E, N))
+#define Sb(N) (*REG_TARGET_PTR(N))
+#define lb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
#define Qb(N) (N)
#define Ib(N) (N)
+
#define x(N) reg[N]
#define y(N) E[N]
#define r(N) x(N)
-
-/*
- * Makes sure that there are StackNeed + HeapNeed + 1 words available
- * on the combined heap/stack segment, then allocates StackNeed + 1
- * words on the stack and saves CP.
- *
- * M is number of live registers to preserve during garbage collection
- */
-
-#define AH(StackNeed, HeapNeed, M) \
- do { \
- int needed; \
- needed = (StackNeed) + 1; \
- if (E - HTOP < (needed + (HeapNeed))) { \
- SWAPOUT; \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect_nobump(c_p, needed + (HeapNeed), \
- reg, (M), FCALLS); \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- SWAPIN; \
- } \
- E -= needed; \
- SAVE_CP(E); \
- } while (0)
-
-#define Allocate(Ns, Live) AH(Ns, 0, Live)
-
-#define AllocateZero(Ns, Live) \
- do { Eterm* ptr; \
- int i = (Ns); \
- AH(i, 0, Live); \
- for (ptr = E + i; ptr > E; ptr--) { \
- make_blank(*ptr); \
- } \
- } while (0)
-
-#define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live)
-
-#define AllocateHeapZero(Ns, Nh, Live) \
- do { Eterm* ptr; \
- int i = (Ns); \
- AH(i, Nh, Live); \
- for (ptr = E + i; ptr > E; ptr--) { \
- make_blank(*ptr); \
- } \
- } while (0)
-
-#define AllocateInit(Ns, Live, Y) \
- do { AH(Ns, 0, Live); make_blank(Y); } while (0)
-
-/*
- * Like the AH macro, but allocates no additional heap space.
- */
-
-#define A(StackNeed, M) AH(StackNeed, 0, M)
-
-#define D(N) \
- RESTORE_CP(E); \
- E += (N) + 1;
-
-
-
-#define TestBinVHeap(VNh, Nh, Live) \
- do { \
- unsigned need = (Nh); \
- if ((E - HTOP < need) || (MSO(c_p).overhead + (VNh) >= BIN_VHEAP_SZ(c_p))) {\
- SWAPOUT; \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live), FCALLS); \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- SWAPIN; \
- } \
- HEAP_SPACE_VERIFIED(need); \
- } while (0)
-
-
-
-/*
- * Check if Nh words of heap are available; if not, do a garbage collection.
- * Live is number of active argument registers to be preserved.
- */
-
-#define TestHeap(Nh, Live) \
- do { \
- unsigned need = (Nh); \
- if (E - HTOP < need) { \
- SWAPOUT; \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live), FCALLS); \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- SWAPIN; \
- } \
- HEAP_SPACE_VERIFIED(need); \
- } while (0)
-
-/*
- * Check if Nh words of heap are available; if not, do a garbage collection.
- * Live is number of active argument registers to be preserved.
- * Takes special care to preserve Extra if a garbage collection occurs.
- */
-
-#define TestHeapPreserve(Nh, Live, Extra) \
- do { \
- unsigned need = (Nh); \
- if (E - HTOP < need) { \
- SWAPOUT; \
- reg[Live] = Extra; \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live)+1, FCALLS); \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
- PROCESS_MAIN_CHK_LOCKS(c_p); \
- Extra = reg[Live]; \
- SWAPIN; \
- } \
- HEAP_SPACE_VERIFIED(need); \
- } while (0)
-
-#define TestHeapPutList(Need, Reg) \
- do { \
- TestHeap((Need), 1); \
- PutList(Reg, r(0), r(0), StoreSimpleDest); \
- CHECK_TERM(r(0)); \
- } while (0)
-
-#define Init(N) make_blank(yb(N))
-
-#define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0)
-#define Init3(Y1, Y2, Y3) \
- do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0)
-
-#define MakeFun(FunP, NumFree) \
- do { \
- HEAVY_SWAPOUT; \
- r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \
- HEAVY_SWAPIN; \
- } while (0)
-
-#define PutTuple(Dst, Arity) \
- do { \
- Dst = make_tuple(HTOP); \
- pt_arity = (Arity); \
- } while (0)
+#define Q(N) (N*sizeof(Eterm *))
+#define l(N) (freg[N].fd)
/*
* Check that we haven't used the reductions and jump to function pointed to by
@@ -448,8 +257,8 @@ void** beam_ops;
#define DispatchMacro() \
do { \
- BeamInstr* dis_next; \
- dis_next = (BeamInstr *) *I; \
+ BeamInstr dis_next; \
+ dis_next = *I; \
CHECK_ARGS(I); \
if (FCALLS > 0 || FCALLS > neg_o_reds) { \
FCALLS--; \
@@ -457,12 +266,12 @@ void** beam_ops;
} else { \
goto context_switch; \
} \
- } while (0)
+ } while (0) \
#define DispatchMacroFun() \
do { \
- BeamInstr* dis_next; \
- dis_next = (BeamInstr *) *I; \
+ BeamInstr dis_next; \
+ dis_next = *I; \
CHECK_ARGS(I); \
if (FCALLS > 0 || FCALLS > neg_o_reds) { \
FCALLS--; \
@@ -472,23 +281,23 @@ void** beam_ops;
} \
} while (0)
-#define DispatchMacrox() \
- do { \
- if (FCALLS > 0) { \
- Eterm* dis_next; \
- SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
- dis_next = (Eterm *) *I; \
- FCALLS--; \
- CHECK_ARGS(I); \
- Goto(dis_next); \
- } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
- && FCALLS > neg_o_reds) { \
- goto save_calls1; \
- } else { \
- SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
- CHECK_ARGS(I); \
- goto context_switch; \
- } \
+#define DispatchMacrox() \
+ do { \
+ if (FCALLS > 0) { \
+ BeamInstr dis_next; \
+ SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
+ dis_next = *I; \
+ FCALLS--; \
+ CHECK_ARGS(I); \
+ Goto(dis_next); \
+ } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
+ && FCALLS > neg_o_reds) { \
+ goto save_calls1; \
+ } else { \
+ SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
+ CHECK_ARGS(I); \
+ goto context_switch; \
+ } \
} while (0)
#ifdef DEBUG
@@ -507,20 +316,7 @@ void** beam_ops;
# define Dispatchfun() DispatchMacroFun()
#endif
-#define Self(R) R = c_p->common.id
-#define Node(R) R = erts_this_node->sysname
-
#define Arg(N) I[(N)+1]
-#define Next(N) \
- I += (N) + 1; \
- ASSERT(VALID_INSTR(*I)); \
- Goto(*I)
-
-#define PreFetch(N, Dst) do { Dst = (BeamInstr *) *(I + N + 1); } while (0)
-#define NextPF(N, Dst) \
- I += N + 1; \
- ASSERT(VALID_INSTR(Dst)); \
- Goto(Dst)
#define GetR(pos, tr) \
do { \
@@ -537,496 +333,42 @@ void** beam_ops;
CHECK_TERM(tr); \
} while (0)
-#define GetArg1(N, Dst) GetR((N), Dst)
-
-#define GetArg2(N, Dst1, Dst2) \
- do { \
- GetR(N, Dst1); \
- GetR((N)+1, Dst2); \
- } while (0)
-
-#define PutList(H, T, Dst, Store) \
- do { \
- HTOP[0] = (H); HTOP[1] = (T); \
- Store(make_list(HTOP), Dst); \
- HTOP += 2; \
- } while (0)
-
-#define Swap(R1, R2) \
- do { \
- Eterm V = R1; \
- R1 = R2; \
- R2 = V; \
- } while (0)
-
-#define SwapTemp(R1, R2, Tmp) \
- do { \
- Eterm V = R1; \
- R1 = R2; \
- R2 = Tmp = V; \
- } while (0)
-
-#define Move(Src, Dst, Store) \
- do { \
- Eterm term = (Src); \
- Store(term, Dst); \
- } while (0)
-
-#define Move2Par(S1, D1, S2, D2) \
- do { \
- Eterm V1, V2; \
- V1 = (S1); V2 = (S2); D1 = V1; D2 = V2; \
- } while (0)
-
-#define MoveShift(Src, SD, D) \
- do { \
- Eterm V; \
- V = Src; D = SD; SD = V; \
- } while (0)
-
-#define MoveDup(Src, D1, D2) \
- do { \
- D1 = D2 = (Src); \
- } while (0)
-
-#define Move3(S1, D1, S2, D2, S3, D3) D1 = (S1); D2 = (S2); D3 = (S3)
-
-#define MoveWindow3(S1, S2, S3, D) \
- do { \
- Eterm xt0, xt1, xt2; \
- Eterm *y = &D; \
- xt0 = S1; \
- xt1 = S2; \
- xt2 = S3; \
- y[0] = xt0; \
- y[1] = xt1; \
- y[2] = xt2; \
- } while (0)
-
-#define MoveWindow4(S1, S2, S3, S4, D) \
- do { \
- Eterm xt0, xt1, xt2, xt3; \
- Eterm *y = &D; \
- xt0 = S1; \
- xt1 = S2; \
- xt2 = S3; \
- xt3 = S4; \
- y[0] = xt0; \
- y[1] = xt1; \
- y[2] = xt2; \
- y[3] = xt3; \
- } while (0)
-
-#define MoveWindow5(S1, S2, S3, S4, S5, D) \
- do { \
- Eterm xt0, xt1, xt2, xt3, xt4; \
- Eterm *y = &D; \
- xt0 = S1; \
- xt1 = S2; \
- xt2 = S3; \
- xt3 = S4; \
- xt4 = S5; \
- y[0] = xt0; \
- y[1] = xt1; \
- y[2] = xt2; \
- y[3] = xt3; \
- y[4] = xt4; \
- } while (0)
-
-#define MoveReturn(Src) \
- x(0) = (Src); \
- I = c_p->cp; \
- ASSERT(VALID_INSTR(*c_p->cp)); \
- c_p->cp = 0; \
- CHECK_TERM(r(0)); \
- Goto(*I)
-
-#define DeallocateReturn(Deallocate) \
- do { \
- int words_to_pop = (Deallocate); \
- SET_I((BeamInstr *) cp_val(*E)); \
- E = ADD_BYTE_OFFSET(E, words_to_pop); \
- CHECK_TERM(r(0)); \
- Goto(*I); \
- } while (0)
-
-#define MoveDeallocateReturn(Src, Deallocate) \
- x(0) = (Src); \
- DeallocateReturn(Deallocate)
-
-#define MoveCall(Src, CallDest, Size) \
- x(0) = (Src); \
- SET_CP(c_p, I+Size+1); \
- SET_I((BeamInstr *) CallDest); \
- Dispatch();
-
-#define MoveCallLast(Src, CallDest, Deallocate) \
- x(0) = (Src); \
- RESTORE_CP(E); \
- E = ADD_BYTE_OFFSET(E, (Deallocate)); \
- SET_I((BeamInstr *) CallDest); \
- Dispatch();
-
-#define MoveCallOnly(Src, CallDest) \
- x(0) = (Src); \
- SET_I((BeamInstr *) CallDest); \
- Dispatch();
-
-#define MoveJump(Src) \
- r(0) = (Src); \
- SET_I((BeamInstr *) Arg(0)); \
- Goto(*I);
-
-#define GetList(Src, H, T) \
- do { \
- Eterm* tmp_ptr = list_val(Src); \
- Eterm hd, tl; \
- hd = CAR(tmp_ptr); \
- tl = CDR(tmp_ptr); \
- H = hd; T = tl; \
- } while (0)
-
-#define GetTupleElement(Src, Element, Dest) \
- do { \
- Eterm* src; \
- src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \
- (Dest) = *src; \
- } while (0)
-
-#define GetTupleElement2(Src, Element, Dest) \
- do { \
- Eterm* src; \
- Eterm* dst; \
- Eterm E1, E2; \
- src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \
- dst = &(Dest); \
- E1 = src[0]; \
- E2 = src[1]; \
- dst[0] = E1; \
- dst[1] = E2; \
- } while (0)
-
-#define GetTupleElement2Y(Src, Element, D1, D2) \
- do { \
- Eterm* src; \
- Eterm E1, E2; \
- src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \
- E1 = src[0]; \
- E2 = src[1]; \
- D1 = E1; \
- D2 = E2; \
- } while (0)
-
-#define GetTupleElement3(Src, Element, Dest) \
- do { \
- Eterm* src; \
- Eterm* dst; \
- Eterm E1, E2, E3; \
- src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \
- dst = &(Dest); \
- E1 = src[0]; \
- E2 = src[1]; \
- E3 = src[2]; \
- dst[0] = E1; \
- dst[1] = E2; \
- dst[2] = E3; \
- } while (0)
-
-#define EqualImmed(X, Y, Action) if (X != Y) { Action; }
-#define NotEqualImmed(X, Y, Action) if (X == Y) { Action; }
-#define EqualExact(X, Y, Action) if (!EQ(X,Y)) { Action; }
-#define NotEqualExact(X, Y, Action) if (EQ(X,Y)) { Action; }
-#define Equal(X, Y, Action) CMP_EQ_ACTION(X,Y,Action)
-#define NotEqual(X, Y, Action) CMP_NE_ACTION(X,Y,Action)
-#define IsLessThan(X, Y, Action) CMP_LT_ACTION(X,Y,Action)
-#define IsGreaterEqual(X, Y, Action) CMP_GE_ACTION(X,Y,Action)
-
-#define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; }
-
-#define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; }
-
-#define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; }
-
-#define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; }
-
-#define IsIntegerAllocate(Src, Need, Alive, Fail) \
- if (is_not_integer(Src)) { Fail; } \
- A(Need, Alive)
-
-#define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; }
-
-#define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; }
-
-#define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; }
-
-#define IsNonemptyListAllocate(Src, Need, Alive, Fail) \
- if (is_not_list(Src)) { Fail; } \
- A(Need, Alive)
-
-#define IsNonemptyListTestHeap(Need, Alive, Fail) \
- if (is_not_list(x(0))) { Fail; } \
- TestHeap(Need, Alive)
-
-#define IsNonemptyListGetList(Src, H, T, Fail) \
- if (is_not_list(Src)) { \
- Fail; \
- } else { \
- Eterm* tmp_ptr = list_val(Src); \
- Eterm hd, tl; \
- hd = CAR(tmp_ptr); \
- tl = CDR(tmp_ptr); \
- H = hd; T = tl; \
- }
-
-#define IsTuple(X, Action) if (is_not_tuple(X)) Action
-
-#define IsArity(Pointer, Arity, Fail) \
- if (*tuple_val(Pointer) != (Arity)) { \
- Fail; \
- }
-
-#define IsMap(Src, Fail) if (!is_map(Src)) { Fail; }
-
-#define GetMapElement(Src, Key, Dst, Fail) \
- do { \
- Eterm _res = get_map_element(Src, Key); \
- if (is_non_value(_res)) { \
- Fail; \
- } \
- Dst = _res; \
- } while (0)
-
-#define GetMapElementHash(Src, Key, Hx, Dst, Fail) \
- do { \
- Eterm _res = get_map_element_hash(Src, Key, Hx); \
- if (is_non_value(_res)) { \
- Fail; \
- } \
- Dst = _res; \
- } while (0)
-
-#define IsFunction(X, Action) \
- do { \
- if ( !(is_any_fun(X)) ) { \
- Action; \
- } \
- } while (0)
+#define PUT_TERM_REG(term, desc) \
+do { \
+ switch (loader_tag(desc)) { \
+ case LOADER_X_REG: \
+ x(loader_x_reg_index(desc)) = (term); \
+ break; \
+ case LOADER_Y_REG: \
+ y(loader_y_reg_index(desc)) = (term); \
+ break; \
+ default: \
+ ASSERT(0); \
+ break; \
+ } \
+} while(0)
-#define IsFunction2(F, A, Action) \
- do { \
- if (erl_is_function(c_p, F, A) != am_true ) { \
- Action; \
- } \
- } while (0)
+#define DispatchReturn \
+do { \
+ if (FCALLS > 0 || FCALLS > neg_o_reds) { \
+ FCALLS--; \
+ Goto(*I); \
+ } \
+ else { \
+ c_p->current = NULL; \
+ c_p->arity = 1; \
+ goto context_switch3; \
+ } \
+} while (0)
#ifdef DEBUG
-#define IsTupleOfArity(Src, Arityval, Fail) \
- do { \
- if (!(is_tuple(Src) && *tuple_val(Src) == Arityval)) { \
- Fail; \
- } \
- } while (0)
+/* Better static type testing by the C compiler */
+# define BEAM_IS_TUPLE(Src) is_tuple(Src)
#else
-#define IsTupleOfArity(Src, Arityval, Fail) \
- do { \
- if (!(is_boxed(Src) && *tuple_val(Src) == Arityval)) { \
- Fail; \
- } \
- } while (0)
+/* Better performance */
+# define BEAM_IS_TUPLE(Src) is_boxed(Src)
#endif
-#define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
-
-#define IsBinary(Src, Fail) \
- if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; }
-
-#define IsBitstring(Src, Fail) \
- if (is_not_binary(Src)) { Fail; }
-
-#if defined(ARCH_64)
-#define BsSafeMul(A, B, Fail, Target) \
- do { Uint64 _res = (A) * (B); \
- if (_res / B != A) { Fail; } \
- Target = _res; \
- } while (0)
-#else
-#define BsSafeMul(A, B, Fail, Target) \
- do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \
- if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \
- Target = _res; \
- } while (0)
-#endif
-
-#define BsGetFieldSize(Bits, Unit, Fail, Target) \
- do { \
- Sint _signed_size; Uint _uint_size; \
- Uint temp_bits; \
- if (is_small(Bits)) { \
- _signed_size = signed_val(Bits); \
- if (_signed_size < 0) { Fail; } \
- _uint_size = (Uint) _signed_size; \
- } else { \
- if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
- _uint_size = temp_bits; \
- } \
- BsSafeMul(_uint_size, Unit, Fail, Target); \
- } while (0)
-
-#define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \
- do { \
- Sint _signed_size; Uint _uint_size; \
- Uint temp_bits; \
- if (is_small(Bits)) { \
- _signed_size = signed_val(Bits); \
- if (_signed_size < 0) { Fail; } \
- _uint_size = (Uint) _signed_size; \
- } else { \
- if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
- _uint_size = (Uint) temp_bits; \
- } \
- Target = _uint_size * Unit; \
- } while (0)
-
-#define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- Eterm _result; Sint _size; \
- if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \
- _size *= ((Flags) >> 3); \
- TestHeap(FLOAT_SIZE_OBJECT, Live); \
- _mb = ms_matchbuffer(Ms); \
- LIGHT_SWAPOUT; \
- _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \
- LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
- if (is_non_value(_result)) { Fail; } \
- else { Store(_result, Dst); } \
- } while (0)
-
-#define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- Eterm _result; \
- TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \
- _mb = ms_matchbuffer(Ms); \
- LIGHT_SWAPOUT; \
- _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \
- LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
- if (is_non_value(_result)) { Fail; } \
- else { Store(_result, Dst); } \
- } while (0)
-
-#define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- Eterm _result; Uint _size; \
- BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \
- TestHeap(ERL_SUB_BIN_SIZE, Live); \
- _mb = ms_matchbuffer(Ms); \
- LIGHT_SWAPOUT; \
- _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \
- LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
- if (is_non_value(_result)) { Fail; } \
- else { Store(_result, Dst); } \
- } while (0)
-
-#define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Store, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- Eterm _result; \
- TestHeap(ERL_SUB_BIN_SIZE, Live); \
- _mb = ms_matchbuffer(Ms); \
- if (((_mb->size - _mb->offset) % Unit) == 0) { \
- LIGHT_SWAPOUT; \
- _result = erts_bs_get_binary_all_2(c_p, _mb); \
- LIGHT_SWAPIN; \
- HEAP_SPACE_VERIFIED(0); \
- ASSERT(is_value(_result)); \
- Store(_result, Dst); \
- } else { \
- HEAP_SPACE_VERIFIED(0); \
- Fail; } \
- } while (0)
-
-#define BsSkipBits2(Ms, Bits, Unit, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- size_t new_offset; \
- Uint _size; \
- _mb = ms_matchbuffer(Ms); \
- BsGetFieldSize(Bits, Unit, Fail, _size); \
- new_offset = _mb->offset + _size; \
- if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
- else { Fail; } \
- } while (0)
-
-#define BsSkipBitsAll2(Ms, Unit, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- _mb = ms_matchbuffer(Ms); \
- if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \
- else { Fail; } \
- } while (0)
-
-#define BsSkipBitsImm2(Ms, Bits, Fail) \
- do { \
- ErlBinMatchBuffer *_mb; \
- size_t new_offset; \
- _mb = ms_matchbuffer(Ms); \
- new_offset = _mb->offset + (Bits); \
- if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
- else { Fail; } \
- } while (0)
-
-#define NewBsPutIntegerImm(Sz, Flags, Src) \
- do { \
- if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \
- } while (0)
-
-#define NewBsPutInteger(Sz, Flags, Src) \
- do { \
- Sint _size; \
- BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
- if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \
- { goto badarg; } \
- } while (0)
-
-#define NewBsPutFloatImm(Sz, Flags, Src) \
- do { \
- if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \
- } while (0)
-
-#define NewBsPutFloat(Sz, Flags, Src) \
- do { \
- Sint _size; \
- BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
- if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \
- } while (0)
-
-#define NewBsPutBinary(Sz, Flags, Src) \
- do { \
- Sint _size; \
- BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
- if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \
- } while (0)
-
-#define NewBsPutBinaryImm(Sz, Src) \
- do { \
- if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \
- } while (0)
-
-#define NewBsPutBinaryAll(Src, Unit) \
- do { \
- if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \
- } while (0)
-
-
-#define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; }
-#define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; }
-#define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
-
/*
* process_main() is already huge, so we want to avoid inlining
* into it. Especially functions that are seldom used.
@@ -1042,25 +384,31 @@ void** beam_ops;
* The following functions are called directly by process_main().
* Don't inline them.
*/
-static BifFunction translate_gc_bif(void* gcf) NOINLINE;
+static void init_emulator_finish(void) NOINLINE;
+static ErtsCodeMFA *ubif2mfa(void* uf) NOINLINE;
+static ErtsCodeMFA *gcbif2mfa(void* gcf) NOINLINE;
static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
- Eterm* reg, BifFunction bf) NOINLINE;
-static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
+ Eterm* reg, ErtsCodeMFA* bif_mfa) NOINLINE;
+static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa,
Eterm* reg, Eterm func) NOINLINE;
-static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity) NOINLINE;
-static BeamInstr* apply(Process* p, Eterm module, Eterm function,
- Eterm args, Eterm* reg) NOINLINE;
+static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint offs) NOINLINE;
+static BeamInstr* apply(Process* p, Eterm* reg,
+ BeamInstr *I, Uint offs) NOINLINE;
static BeamInstr* call_fun(Process* p, int arity,
Eterm* reg, Eterm args) NOINLINE;
static BeamInstr* apply_fun(Process* p, Eterm fun,
Eterm args, Eterm* reg) NOINLINE;
static Eterm new_fun(Process* p, Eterm* reg,
ErlFunEntry* fe, int num_free) NOINLINE;
-static Eterm new_map(Process* p, Eterm* reg, BeamInstr* I) NOINLINE;
-static Eterm update_map_assoc(Process* p, Eterm* reg,
- Eterm map, BeamInstr* I) NOINLINE;
-static Eterm update_map_exact(Process* p, Eterm* reg,
- Eterm map, BeamInstr* I) NOINLINE;
+static Eterm erts_gc_new_map(Process* p, Eterm* reg, Uint live,
+ Uint n, BeamInstr* ptr) NOINLINE;
+static Eterm erts_gc_new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal,
+ Uint live, BeamInstr* ptr) NOINLINE;
+static Eterm erts_gc_update_map_assoc(Process* p, Eterm* reg, Uint live,
+ Uint n, BeamInstr* new_p) NOINLINE;
+static Eterm erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live,
+ Uint n, Eterm* new_p) NOINLINE;
static Eterm get_map_element(Eterm map, Eterm key);
static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx);
@@ -1071,14 +419,14 @@ static BeamInstr* next_catch(Process* c_p, Eterm *reg);
static void terminate_proc(Process* c_p, Eterm Value);
static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
- BifFunction bf, Eterm args);
+ ErtsCodeMFA *bif_mfa, Eterm args);
static struct StackTrace * get_trace_from_exc(Eterm exc);
static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
void
init_emulator(void)
{
- process_main();
+ process_main(0, 0);
}
/*
@@ -1092,6 +440,12 @@ init_emulator(void)
# define REG_stop asm("%l3")
# define REG_I asm("%l4")
# define REG_fcalls asm("%l5")
+#elif defined(__GNUC__) && defined(__amd64__) && !defined(DEBUG)
+# define REG_xregs asm("%r12")
+# define REG_htop
+# define REG_stop asm("%r13")
+# define REG_I asm("%rbx")
+# define REG_fcalls asm("%r14")
#else
# define REG_xregs
# define REG_htop
@@ -1106,98 +460,91 @@ init_emulator(void)
#ifdef USE_VM_CALL_PROBES
-#define DTRACE_LOCAL_CALL(p, m, f, a) \
+#define DTRACE_LOCAL_CALL(p, mfa) \
if (DTRACE_ENABLED(local_function_entry)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(local_function_entry, process_name, mfa, depth); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(local_function_entry, process_name, mfa_buf, depth); \
}
-#define DTRACE_GLOBAL_CALL(p, m, f, a) \
+#define DTRACE_GLOBAL_CALL(p, mfa) \
if (DTRACE_ENABLED(global_function_entry)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(global_function_entry, process_name, mfa, depth); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(global_function_entry, process_name, mfa_buf, depth); \
}
-#define DTRACE_RETURN(p, m, f, a) \
+#define DTRACE_RETURN(p, mfa) \
if (DTRACE_ENABLED(function_return)) { \
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
int depth = STACK_START(p) - STACK_TOP(p); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE3(function_return, process_name, mfa, depth); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE3(function_return, process_name, mfa_buf, depth); \
}
-#define DTRACE_BIF_ENTRY(p, m, f, a) \
- if (DTRACE_ENABLED(bif_entry)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(bif_entry, process_name, mfa); \
+#define DTRACE_BIF_ENTRY(p, mfa) \
+ if (DTRACE_ENABLED(bif_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(bif_entry, process_name, mfa_buf); \
}
-#define DTRACE_BIF_RETURN(p, m, f, a) \
- if (DTRACE_ENABLED(bif_return)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(bif_return, process_name, mfa); \
+#define DTRACE_BIF_RETURN(p, mfa) \
+ if (DTRACE_ENABLED(bif_return)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(bif_return, process_name, mfa_buf); \
}
-#define DTRACE_NIF_ENTRY(p, m, f, a) \
- if (DTRACE_ENABLED(nif_entry)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(nif_entry, process_name, mfa); \
+#define DTRACE_NIF_ENTRY(p, mfa) \
+ if (DTRACE_ENABLED(nif_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(nif_entry, process_name, mfa_buf); \
}
-#define DTRACE_NIF_RETURN(p, m, f, a) \
- if (DTRACE_ENABLED(nif_return)) { \
- DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
- dtrace_fun_decode(p, m, f, a, \
- process_name, mfa); \
- DTRACE2(nif_return, process_name, mfa); \
+#define DTRACE_NIF_RETURN(p, mfa) \
+ if (DTRACE_ENABLED(nif_return)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, mfa, process_name, mfa_buf); \
+ DTRACE2(nif_return, process_name, mfa_buf); \
}
#define DTRACE_GLOBAL_CALL_FROM_EXPORT(p,e) \
do { \
if (DTRACE_ENABLED(global_function_entry)) { \
BeamInstr* fp = (BeamInstr *) (((Export *) (e))->addressv[erts_active_code_ix()]); \
- DTRACE_GLOBAL_CALL((p), (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]); \
+ DTRACE_GLOBAL_CALL((p), erts_code_to_codemfa(fp)); \
} \
} while(0)
#define DTRACE_RETURN_FROM_PC(p) \
do { \
- BeamInstr* fp; \
- if (DTRACE_ENABLED(function_return) && (fp = find_function_from_pc((p)->cp))) { \
- DTRACE_RETURN((p), (Eterm)fp[0], (Eterm)fp[1], (Uint)fp[2]); \
+ ErtsCodeMFA* cmfa; \
+ if (DTRACE_ENABLED(function_return) && (cmfa = find_function_from_pc((p)->cp))) { \
+ DTRACE_RETURN((p), cmfa); \
} \
} while(0)
#else /* USE_VM_PROBES */
-#define DTRACE_LOCAL_CALL(p, m, f, a) do {} while (0)
-#define DTRACE_GLOBAL_CALL(p, m, f, a) do {} while (0)
+#define DTRACE_LOCAL_CALL(p, mfa) do {} while (0)
+#define DTRACE_GLOBAL_CALL(p, mfa) do {} while (0)
#define DTRACE_GLOBAL_CALL_FROM_EXPORT(p, e) do {} while (0)
-#define DTRACE_RETURN(p, m, f, a) do {} while (0)
+#define DTRACE_RETURN(p, mfa) do {} while (0)
#define DTRACE_RETURN_FROM_PC(p) do {} while (0)
-#define DTRACE_BIF_ENTRY(p, m, f, a) do {} while (0)
-#define DTRACE_BIF_RETURN(p, m, f, a) do {} while (0)
-#define DTRACE_NIF_ENTRY(p, m, f, a) do {} while (0)
-#define DTRACE_NIF_RETURN(p, m, f, a) do {} while (0)
+#define DTRACE_BIF_ENTRY(p, mfa) do {} while (0)
+#define DTRACE_BIF_RETURN(p, mfa) do {} while (0)
+#define DTRACE_NIF_ENTRY(p, mfa) do {} while (0)
+#define DTRACE_NIF_RETURN(p, mfa) do {} while (0)
#endif /* USE_VM_PROBES */
#ifdef DEBUG
@@ -1218,6 +565,13 @@ init_emulator(void)
#define ERTS_DBG_CHK_REDS(P, FC)
#endif
+#ifdef NO_FPE_SIGNALS
+# define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT
+# define ERTS_NO_FPE_ERROR ERTS_FP_ERROR
+#else
+# define ERTS_NO_FPE_CHECK_INIT(p)
+# define ERTS_NO_FPE_ERROR(p, a, b)
+#endif
/*
* process_main() is called twice:
@@ -1225,7 +579,8 @@ init_emulator(void)
* the instructions' C labels to the loader.
* The second call starts execution of BEAM code. This call never returns.
*/
-void process_main(void)
+ERTS_NO_RETPOLINE
+void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
{
static int init_done = 0;
Process* c_p = NULL;
@@ -1237,7 +592,7 @@ void process_main(void)
/* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
* in all other cases x0 is used.
*/
- register Eterm* reg REG_xregs = NULL;
+ register Eterm* reg REG_xregs = x_reg_array;
/*
* Top of heap (next free location); grows upwards.
@@ -1264,7 +619,7 @@ void process_main(void)
* X registers and floating point registers are located in
* scheduler specific data.
*/
- register FloatDef *freg;
+ register FloatDef *freg = f_reg_array;
/*
* For keeping the negative old value of 'reds' when call saving is active.
@@ -1277,12 +632,10 @@ void process_main(void)
#ifndef NO_JUMP_TABLE
static void* opcodes[] = { DEFINE_OPCODES };
#else
- int Go;
+ register BeamInstr Go;
#endif
#endif
- Eterm pt_arity; /* Used by do_put_tuple */
-
Uint64 start_time = 0; /* Monitor long schedule */
BeamInstr* start_time_i = NULL;
@@ -1299,7 +652,7 @@ void process_main(void)
* Note: c_p->arity must be set to reflect the number of useful terms in
* c_p->arg_reg before calling the scheduler.
*/
- if (!init_done) {
+ if (ERTS_UNLIKELY(!init_done)) {
/* This should only be reached during the init phase when only the main
* process is running. I.e. there is no race for init_done.
*/
@@ -1313,6 +666,7 @@ void process_main(void)
goto do_schedule1;
do_schedule:
+ ASSERT(c_p->arity < 6);
ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
reds_used = REDS_IN(c_p) - FCALLS;
@@ -1324,23 +678,23 @@ void process_main(void)
if (start_time != 0) {
Sint64 diff = erts_timestamp_millis() - start_time;
if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
- BeamInstr *inptr = find_function_from_pc(start_time_i);
- BeamInstr *outptr = find_function_from_pc(c_p->i);
+ ErtsCodeMFA *inptr = find_function_from_pc(start_time_i);
+ ErtsCodeMFA *outptr = find_function_from_pc(c_p->i);
monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
}
}
PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
c_p = erts_schedule(NULL, c_p, reds_used);
ASSERT(!(c_p->flags & F_HIPE_MODE));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
start_time = 0;
#ifdef DEBUG
- pid = c_p->common.id; /* Save for debugging purpouses */
+ pid = c_p->common.id; /* Save for debugging purposes */
#endif
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_MSACC_UPDATE_CACHE_X();
@@ -1350,13 +704,11 @@ void process_main(void)
start_time_i = c_p->i;
}
- reg = erts_proc_sched_data(c_p)->x_reg_array;
- freg = erts_proc_sched_data(c_p)->f_reg_array;
ERL_BITS_RELOAD_STATEP(c_p);
{
int reds;
Eterm* argp;
- BeamInstr *next;
+ BeamInstr next;
int i;
argp = c_p->arg_reg;
@@ -1388,7 +740,7 @@ void process_main(void)
ERTS_DBG_CHK_REDS(c_p, FCALLS);
- next = (BeamInstr *) *I;
+ next = *I;
SWAPIN;
ASSERT(VALID_INSTR(next));
@@ -1399,12 +751,11 @@ void process_main(void)
dtrace_proc_str(c_p, process_buf);
if (ERTS_PROC_IS_EXITING(c_p)) {
- strcpy(fun_buf, "<exiting>");
+ sys_strcpy(fun_buf, "<exiting>");
} else {
- BeamInstr *fptr = find_function_from_pc(c_p->i);
- if (fptr) {
- dtrace_fun_decode(c_p, (Eterm)fptr[0],
- (Eterm)fptr[1], (Uint)fptr[2],
+ ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
+ if (cmfa) {
+ dtrace_fun_decode(c_p, cmfa,
NULL, fun_buf);
} else {
erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
@@ -1425,1915 +776,8 @@ void process_main(void)
#ifdef NO_JUMP_TABLE
switch (Go) {
#endif
-#include "beam_hot.h"
-
- {
- Eterm increment_reg_val;
- Eterm increment_val;
- Uint live;
- Eterm result;
-
- OpCase(i_increment_rIId):
- increment_reg_val = x(0);
- I--;
- goto do_increment;
-
- OpCase(i_increment_xIId):
- increment_reg_val = xb(Arg(0));
- goto do_increment;
-
- OpCase(i_increment_yIId):
- increment_reg_val = yb(Arg(0));
- goto do_increment;
-
- do_increment:
- increment_val = Arg(1);
- if (is_small(increment_reg_val)) {
- Sint i = signed_val(increment_reg_val) + increment_val;
- ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
- if (MY_IS_SSMALL(i)) {
- result = make_small(i);
- StoreBifResult(3, result);
- }
- }
-
- live = Arg(2);
- HEAVY_SWAPOUT;
- reg[live] = increment_reg_val;
- reg[live+1] = make_small(increment_val);
- result = erts_gc_mixed_plus(c_p, reg, live);
- HEAVY_SWAPIN;
- ERTS_HOLE_CHECK(c_p);
- if (is_value(result)) {
- StoreBifResult(3, result);
- }
- ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
- goto find_func_info;
- }
-
-#define DO_OUTLINED_ARITH_2(name, Op1, Op2) \
- do { \
- Eterm result; \
- Uint live = Arg(1); \
- \
- HEAVY_SWAPOUT; \
- reg[live] = Op1; \
- reg[live+1] = Op2; \
- result = erts_gc_##name(c_p, reg, live); \
- HEAVY_SWAPIN; \
- ERTS_HOLE_CHECK(c_p); \
- if (is_value(result)) { \
- StoreBifResult(4, result); \
- } \
- goto lb_Cl_error; \
- } while (0)
-
- {
- Eterm PlusOp1, PlusOp2;
- Eterm result;
-
- OpCase(i_plus_jIxxd):
- PlusOp1 = xb(Arg(2));
- PlusOp2 = xb(Arg(3));
- goto do_plus;
-
- OpCase(i_plus_jIxyd):
- PlusOp1 = xb(Arg(2));
- PlusOp2 = yb(Arg(3));
- goto do_plus;
-
- OpCase(i_plus_jIssd):
- GetArg2(2, PlusOp1, PlusOp2);
- goto do_plus;
-
- do_plus:
- if (is_both_small(PlusOp1, PlusOp2)) {
- Sint i = signed_val(PlusOp1) + signed_val(PlusOp2);
- ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
- if (MY_IS_SSMALL(i)) {
- result = make_small(i);
- StoreBifResult(4, result);
- }
- }
- DO_OUTLINED_ARITH_2(mixed_plus, PlusOp1, PlusOp2);
- }
-
- {
- Eterm MinusOp1, MinusOp2;
- Eterm result;
-
- OpCase(i_minus_jIxxd):
- MinusOp1 = xb(Arg(2));
- MinusOp2 = xb(Arg(3));
- goto do_minus;
-
- OpCase(i_minus_jIssd):
- GetArg2(2, MinusOp1, MinusOp2);
- goto do_minus;
-
- do_minus:
- if (is_both_small(MinusOp1, MinusOp2)) {
- Sint i = signed_val(MinusOp1) - signed_val(MinusOp2);
- ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
- if (MY_IS_SSMALL(i)) {
- result = make_small(i);
- StoreBifResult(4, result);
- }
- }
- DO_OUTLINED_ARITH_2(mixed_minus, MinusOp1, MinusOp2);
- }
-
- {
- Eterm is_eq_exact_lit_val;
-
- OpCase(i_is_eq_exact_literal_fxc):
- is_eq_exact_lit_val = xb(Arg(1));
- goto do_is_eq_exact_literal;
-
- OpCase(i_is_eq_exact_literal_fyc):
- is_eq_exact_lit_val = yb(Arg(1));
- goto do_is_eq_exact_literal;
-
- do_is_eq_exact_literal:
- if (!eq(Arg(2), is_eq_exact_lit_val)) {
- ClauseFail();
- }
- Next(3);
- }
-
- {
- Eterm is_ne_exact_lit_val;
-
- OpCase(i_is_ne_exact_literal_fxc):
- is_ne_exact_lit_val = xb(Arg(1));
- goto do_is_ne_exact_literal;
-
- OpCase(i_is_ne_exact_literal_fyc):
- is_ne_exact_lit_val = yb(Arg(1));
- goto do_is_ne_exact_literal;
-
- do_is_ne_exact_literal:
- if (eq(Arg(2), is_ne_exact_lit_val)) {
- ClauseFail();
- }
- Next(3);
- }
-
- OpCase(i_move_call_only_fc): {
- r(0) = Arg(1);
- }
- /* FALL THROUGH */
- OpCase(i_call_only_f): {
- SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
- Dispatch();
- }
-
- OpCase(i_move_call_last_fPc): {
- r(0) = Arg(2);
- }
- /* FALL THROUGH */
- OpCase(i_call_last_fP): {
- RESTORE_CP(E);
- E = ADD_BYTE_OFFSET(E, Arg(1));
- SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
- Dispatch();
- }
-
- OpCase(i_move_call_cf): {
- r(0) = Arg(0);
- I++;
- }
- /* FALL THROUGH */
- OpCase(i_call_f): {
- SET_CP(c_p, I+2);
- SET_I((BeamInstr *) Arg(0));
- DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
- Dispatch();
- }
-
- OpCase(i_move_call_ext_last_ePc): {
- r(0) = Arg(2);
- }
- /* FALL THROUGH */
- OpCase(i_call_ext_last_eP):
- RESTORE_CP(E);
- E = ADD_BYTE_OFFSET(E, Arg(1));
-
- /*
- * Note: The pointer to the export entry is never NULL; if the module
- * is not loaded, it points to code which will invoke the error handler
- * (see lb_call_error_handler below).
- */
- DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0));
- Dispatchx();
-
- OpCase(i_move_call_ext_ce): {
- r(0) = Arg(0);
- I++;
- }
- /* FALL THROUGH */
- OpCase(i_call_ext_e):
- SET_CP(c_p, I+2);
- DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0));
- Dispatchx();
-
- OpCase(i_move_call_ext_only_ec): {
- r(0) = Arg(1);
- }
- /* FALL THROUGH */
- OpCase(i_call_ext_only_e):
- DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0));
- Dispatchx();
-
- OpCase(init_y): {
- BeamInstr *next;
-
- PreFetch(1, next);
- make_blank(yb(Arg(0)));
- NextPF(1, next);
- }
-
- OpCase(i_trim_I): {
- BeamInstr *next;
- Uint words;
- Uint cp;
-
- words = Arg(0);
- cp = E[0];
- PreFetch(1, next);
- E += words;
- E[0] = cp;
- NextPF(1, next);
- }
-
- OpCase(move_x1_c): {
- x(1) = Arg(0);
- Next(1);
- }
-
- OpCase(move_x2_c): {
- x(2) = Arg(0);
- Next(1);
- }
-
- OpCase(return): {
- SET_I(c_p->cp);
- DTRACE_RETURN_FROM_PC(c_p);
- /*
- * We must clear the CP to make sure that a stale value do not
- * create a false module dependcy preventing code upgrading.
- * It also means that we can use the CP in stack backtraces.
- */
- c_p->cp = 0;
- CHECK_TERM(r(0));
- HEAP_SPACE_VERIFIED(0);
- Goto(*I);
- }
-
- /*
- * Send is almost a standard call-BIF with two arguments, except for:
- * 1) It cannot be traced.
- * 2) There is no pointer to the send_2 function stored in
- * the instruction.
- */
-
- OpCase(send): {
- BeamInstr *next;
- Eterm result;
-
- if (!(FCALLS > 0 || FCALLS > neg_o_reds)) {
- /* If we have run out of reductions, we do a context
- switch before calling the bif */
- c_p->arity = 2;
- c_p->current = NULL;
- goto context_switch3;
- }
-
- PRE_BIF_SWAPOUT(c_p);
- c_p->fcalls = FCALLS - 1;
- result = erl_send(c_p, r(0), x(1));
- PreFetch(0, next);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- HTOP = HEAP_TOP(c_p);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- r(0) = result;
- CHECK_TERM(r(0));
- NextPF(0, next);
- } else if (c_p->freason == TRAP) {
- SET_CP(c_p, I+1);
- SET_I(c_p->i);
- SWAPIN;
- Dispatch();
- }
- goto find_func_info;
- }
-
- {
- Eterm element_index;
- Eterm element_tuple;
-
- OpCase(i_element_jxsd):
- element_tuple = xb(Arg(1));
- goto do_element;
-
- OpCase(i_element_jysd):
- element_tuple = yb(Arg(1));
- goto do_element;
-
- do_element:
- GetArg1(2, element_index);
- if (is_small(element_index) && is_tuple(element_tuple)) {
- Eterm* tp = tuple_val(element_tuple);
-
- if ((signed_val(element_index) >= 1) &&
- (signed_val(element_index) <= arityval(*tp))) {
- Eterm result = tp[signed_val(element_index)];
- StoreBifResult(3, result);
- }
- }
- }
- /* Fall through */
-
- OpCase(badarg_j):
- badarg:
- c_p->freason = BADARG;
- goto lb_Cl_error;
-
- {
- Eterm fast_element_tuple;
-
- OpCase(i_fast_element_jxId):
- fast_element_tuple = xb(Arg(1));
- goto do_fast_element;
-
- OpCase(i_fast_element_jyId):
- fast_element_tuple = yb(Arg(1));
- goto do_fast_element;
-
- do_fast_element:
- if (is_tuple(fast_element_tuple)) {
- Eterm* tp = tuple_val(fast_element_tuple);
- Eterm pos = Arg(2); /* Untagged integer >= 1 */
- if (pos <= arityval(*tp)) {
- Eterm result = tp[pos];
- StoreBifResult(3, result);
- }
- }
- goto badarg;
- }
-
- OpCase(catch_yf):
- c_p->catches++;
- yb(Arg(0)) = Arg(1);
- Next(2);
-
- OpCase(catch_end_y): {
- c_p->catches--;
- make_blank(yb(Arg(0)));
- if (is_non_value(r(0))) {
- if (x(1) == am_throw) {
- r(0) = x(2);
- } else {
- if (x(1) == am_error) {
- SWAPOUT;
- x(2) = add_stacktrace(c_p, x(2), x(3));
- SWAPIN;
- }
- /* only x(2) is included in the rootset here */
- if (E - HTOP < 3) {
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect_nobump(c_p, 3, reg+2, 1, FCALLS);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- SWAPIN;
- }
- r(0) = TUPLE2(HTOP, am_EXIT, x(2));
- HTOP += 3;
- }
- }
- CHECK_TERM(r(0));
- Next(1);
- }
-
- OpCase(try_end_y): {
- c_p->catches--;
- make_blank(yb(Arg(0)));
- if (is_non_value(r(0))) {
- r(0) = x(1);
- x(1) = x(2);
- x(2) = x(3);
- }
- Next(1);
- }
-
- /*
- * Skeleton for receive statement:
- *
- * recv_mark L1 Optional
- * call make_ref/monitor Optional
- * ...
- * recv_set L1 Optional
- * L1: <-------------------+
- * <-----------+ |
- * | |
- * loop_rec L2 ------+---+ |
- * ... | | |
- * remove_message | | |
- * jump L3 | | |
- * ... | | |
- * loop_rec_end L1 --+ | |
- * L2: <---------------+ |
- * wait L1 -----------------+ or wait_timeout
- * timeout
- *
- * L3: Code after receive...
- *
- *
- */
-
- OpCase(recv_mark_f): {
- /*
- * Save the current position in message buffer and the
- * the label for the loop_rec/2 instruction for the
- * the receive statement.
- */
- c_p->msg.mark = (BeamInstr *) Arg(0);
- c_p->msg.saved_last = c_p->msg.last;
- Next(1);
- }
-
- OpCase(i_recv_set): {
- /*
- * If the mark is valid (points to the loop_rec/2
- * instruction that follows), we know that the saved
- * position points to the first message that could
- * possibly be matched out.
- *
- * If the mark is invalid, we do nothing, meaning that
- * we will look through all messages in the message queue.
- */
- if (c_p->msg.mark == (BeamInstr *) (I+1)) {
- c_p->msg.save = c_p->msg.saved_last;
- }
- I++;
- /* Fall through to the loop_rec/2 instruction */
- }
-
- /*
- * Pick up the next message and place it in x(0).
- * If no message, jump to a wait or wait_timeout instruction.
- */
- OpCase(i_loop_rec_f):
- {
- BeamInstr *next;
- ErtsMessage* msgp;
-
- /*
- * We need to disable GC while matching messages
- * in the queue. This since messages with data outside
- * the heap will be corrupted by a GC.
- */
- ASSERT(!(c_p->flags & F_DELAY_GC));
- c_p->flags |= F_DELAY_GC;
-
- loop_rec__:
-
- PROCESS_MAIN_CHK_LOCKS(c_p);
-
- msgp = PEEK_MESSAGE(c_p);
-
- if (!msgp) {
-#ifdef ERTS_SMP
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- /* Make sure messages wont pass exit signals... */
- if (ERTS_PROC_PENDING_EXIT(c_p)) {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- SWAPOUT;
- c_p->flags &= ~F_DELAY_GC;
- goto do_schedule; /* Will be rescheduled for exit */
- }
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
- msgp = PEEK_MESSAGE(c_p);
- if (msgp)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- else
-#endif
- {
- c_p->flags &= ~F_DELAY_GC;
- SET_I((BeamInstr *) Arg(0));
- Goto(*I); /* Jump to a wait or wait_timeout instruction */
- }
- }
- if (is_non_value(ERL_MESSAGE_TERM(msgp))) {
- SWAPOUT; /* erts_decode_dist_message() may write to heap... */
- if (!erts_decode_dist_message(c_p, ERTS_PROC_LOCK_MAIN, msgp, 0)) {
- /*
- * A corrupt distribution message that we weren't able to decode;
- * remove it...
- */
- /* No swapin should be needed */
- ASSERT(HTOP == c_p->htop && E == c_p->stop);
- /* TODO: Add DTrace probe for this bad message situation? */
- UNLINK_MESSAGE(c_p, msgp);
- msgp->next = NULL;
- erts_cleanup_messages(msgp);
- goto loop_rec__;
- }
- SWAPIN;
- }
- PreFetch(1, next);
- r(0) = ERL_MESSAGE_TERM(msgp);
- NextPF(1, next);
- }
-
- /*
- * Remove a (matched) message from the message queue.
- */
- OpCase(remove_message): {
- BeamInstr *next;
- ErtsMessage* msgp;
- PROCESS_MAIN_CHK_LOCKS(c_p);
-
- ERTS_CHK_MBUF_SZ(c_p);
-
- PreFetch(0, next);
- msgp = PEEK_MESSAGE(c_p);
-
- if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
- save_calls(c_p, &exp_receive);
- }
- if (ERL_MESSAGE_TOKEN(msgp) == NIL) {
-#ifdef USE_VM_PROBES
- if (DT_UTAG(c_p) != NIL) {
- if (DT_UTAG_FLAGS(c_p) & DT_UTAG_PERMANENT) {
- SEQ_TRACE_TOKEN(c_p) = am_have_dt_utag;
- } else {
- DT_UTAG(c_p) = NIL;
- SEQ_TRACE_TOKEN(c_p) = NIL;
- }
- } else {
-#endif
- SEQ_TRACE_TOKEN(c_p) = NIL;
-#ifdef USE_VM_PROBES
- }
- DT_UTAG_FLAGS(c_p) &= ~DT_UTAG_SPREADING;
-#endif
- } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) {
- Eterm msg;
- SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp);
-#ifdef USE_VM_PROBES
- if (ERL_MESSAGE_TOKEN(msgp) == am_have_dt_utag) {
- if (DT_UTAG(c_p) == NIL) {
- DT_UTAG(c_p) = ERL_MESSAGE_DT_UTAG(msgp);
- }
- DT_UTAG_FLAGS(c_p) |= DT_UTAG_SPREADING;
- } else {
-#endif
- ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
- ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
- ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
- ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
- ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
- ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
- c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
- if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
- c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
- }
- msg = ERL_MESSAGE_TERM(msgp);
- seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
- c_p->common.id, c_p);
-#ifdef USE_VM_PROBES
- }
-#endif
- }
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(message_receive)) {
- Eterm token2 = NIL;
- DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
- Sint tok_label = 0;
- Sint tok_lastcnt = 0;
- Sint tok_serial = 0;
-
- dtrace_proc_str(c_p, receiver_name);
- token2 = SEQ_TRACE_TOKEN(c_p);
- if (have_seqtrace(token2)) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(token2));
- tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token2));
- tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token2));
- }
- DTRACE6(message_receive,
- receiver_name, size_object(ERL_MESSAGE_TERM(msgp)),
- c_p->msg.len - 1, tok_label, tok_lastcnt, tok_serial);
- }
-#endif
- UNLINK_MESSAGE(c_p, msgp);
- JOIN_MESSAGE(c_p);
- CANCEL_TIMER(c_p);
-
- erts_save_message_in_proc(c_p, msgp);
- c_p->flags &= ~F_DELAY_GC;
-
- if (ERTS_IS_GC_DESIRED_INTERNAL(c_p, HTOP, E)) {
- /*
- * We want to GC soon but we leave a few
- * reductions giving the message some time
- * to turn into garbage.
- */
- ERTS_VBUMP_LEAVE_REDS_INTERNAL(c_p, 5, FCALLS);
- }
-
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- ERTS_CHK_MBUF_SZ(c_p);
-
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- NextPF(0, next);
- }
-
- /*
- * Advance the save pointer to the next message (the current
- * message didn't match), then jump to the loop_rec instruction.
- */
- OpCase(loop_rec_end_f): {
-
- ASSERT(c_p->flags & F_DELAY_GC);
-
- SET_I((BeamInstr *) Arg(0));
- SAVE_MESSAGE(c_p);
- if (FCALLS > 0 || FCALLS > neg_o_reds) {
- FCALLS--;
- goto loop_rec__;
- }
-
- c_p->flags &= ~F_DELAY_GC;
- c_p->i = I;
- SWAPOUT;
- c_p->arity = 0;
- c_p->current = NULL;
- goto do_schedule;
- }
- /*
- * Prepare to wait for a message or a timeout, whichever occurs first.
- *
- * Note: In order to keep the compatibility between 32 and 64 bits
- * emulators, only timeout values that can be represented in 32 bits
- * (unsigned) or less are allowed.
- */
-
-
- OpCase(i_wait_timeout_fs): {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
-
- /* Fall through */
- }
- OpCase(i_wait_timeout_locked_fs): {
- Eterm timeout_value;
-
- /*
- * If we have already set the timer, we must NOT set it again. Therefore,
- * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
- */
- if (c_p->flags & (F_INSLPQUEUE | F_TIMO)) {
- goto wait2;
- }
- GetArg1(1, timeout_value);
- if (timeout_value != make_small(0)) {
-
- if (timeout_value == am_infinity)
- c_p->flags |= F_TIMO;
- else {
- int tres = erts_set_proc_timer_term(c_p, timeout_value);
- if (tres == 0) {
- /*
- * The timer routiner will set c_p->i to the value in
- * c_p->def_arg_reg[0]. Note that it is safe to use this
- * location because there are no living x registers in
- * a receive statement.
- */
- BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg;
- *pi = I+3;
- }
- else { /* Wrong time */
- OpCase(i_wait_error_locked): {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- /* Fall through */
- }
- OpCase(i_wait_error): {
- c_p->freason = EXC_TIMEOUT_VALUE;
- goto find_func_info;
- }
- }
- }
-
- /*
- * Prepare to wait indefinitely for a new message to arrive
- * (or the time set above if falling through from above).
- *
- * When a new message arrives, control will be transferred
- * the loop_rec instruction (at label L1). In case of
- * of timeout, control will be transferred to the timeout
- * instruction following the wait_timeout instruction.
- */
-
- OpCase(wait_locked_f):
- OpCase(wait_f):
-
- wait2: {
-#ifndef ERTS_SMP
- if (ERTS_PROC_IS_EXITING(c_p)) {
- /*
- * I non smp case:
- *
- * Currently executing process might be sent an exit
- * signal if it is traced by a port that it also is
- * linked to, and the port terminates during the
- * trace. In this case we do *not* want to clear
- * the active flag, which will make the process hang
- * in limbo forever.
- */
- SWAPOUT;
- goto do_schedule;
- }
-#endif
- c_p->i = (BeamInstr *) Arg(0); /* L1 */
- SWAPOUT;
- c_p->arity = 0;
-
- if (!ERTS_PTMR_IS_TIMED_OUT(c_p))
- erts_smp_atomic32_read_band_relb(&c_p->state,
- ~ERTS_PSFLG_ACTIVE);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- c_p->current = NULL;
- goto do_schedule;
- }
- OpCase(wait_unlocked_f): {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- goto wait2;
- }
- }
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- Next(2);
- }
-
- OpCase(i_wait_timeout_fI): {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- }
-
- OpCase(i_wait_timeout_locked_fI):
- {
- /*
- * If we have already set the timer, we must NOT set it again. Therefore,
- * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
- */
- if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
- BeamInstr** p = (BeamInstr **) c_p->def_arg_reg;
- *p = I+3;
- erts_set_proc_timer_uword(c_p, Arg(1));
- }
- goto wait2;
- }
-
- /*
- * A timeout has occurred. Reset the save pointer so that the next
- * receive statement will examine the first message first.
- */
- OpCase(timeout_locked): {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- }
-
- OpCase(timeout): {
- BeamInstr *next;
-
- PreFetch(0, next);
- if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
- trace_receive(c_p, am_clock_service, am_timeout, NULL);
- }
- if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
- save_calls(c_p, &exp_timeout);
- }
- c_p->flags &= ~F_TIMO;
- JOIN_MESSAGE(c_p);
- NextPF(0, next);
- }
-
- {
- Eterm select_val2;
-
- OpCase(i_select_tuple_arity2_yfAAff):
- select_val2 = yb(Arg(0));
- goto do_select_tuple_arity2;
-
- OpCase(i_select_tuple_arity2_xfAAff):
- select_val2 = xb(Arg(0));
- goto do_select_tuple_arity2;
-
- do_select_tuple_arity2:
- if (is_not_tuple(select_val2)) {
- goto select_val2_fail;
- }
- select_val2 = *tuple_val(select_val2);
- goto do_select_val2;
-
- OpCase(i_select_val2_yfccff):
- select_val2 = yb(Arg(0));
- goto do_select_val2;
-
- OpCase(i_select_val2_xfccff):
- select_val2 = xb(Arg(0));
- goto do_select_val2;
-
- do_select_val2:
- if (select_val2 == Arg(2)) {
- I += 3;
- } else if (select_val2 == Arg(3)) {
- I += 4;
- }
-
- select_val2_fail:
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
-
- {
- Eterm select_val;
-
- OpCase(i_select_tuple_arity_xfI):
- select_val = xb(Arg(0));
- goto do_select_tuple_arity;
-
- OpCase(i_select_tuple_arity_yfI):
- select_val = yb(Arg(0));
- goto do_select_tuple_arity;
-
- do_select_tuple_arity:
- if (is_tuple(select_val)) {
- select_val = *tuple_val(select_val);
- goto do_linear_search;
- }
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
-
- OpCase(i_select_val_lins_xfI):
- select_val = xb(Arg(0));
- goto do_linear_search;
-
- OpCase(i_select_val_lins_yfI):
- select_val = yb(Arg(0));
- goto do_linear_search;
-
- do_linear_search: {
- BeamInstr *vs = &Arg(3);
- int ix = 0;
-
- for(;;) {
- if (vs[ix+0] >= select_val) { ix += 0; break; }
- if (vs[ix+1] >= select_val) { ix += 1; break; }
- ix += 2;
- }
-
- if (vs[ix] == select_val) {
- I += ix + Arg(2) + 2;
- }
-
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
-
- OpCase(i_select_val_bins_xfI):
- select_val = xb(Arg(0));
- goto do_binary_search;
-
- OpCase(i_select_val_bins_yfI):
- select_val = yb(Arg(0));
- goto do_binary_search;
-
- do_binary_search:
- {
- struct Pairs {
- BeamInstr val;
- BeamInstr* addr;
- };
- struct Pairs* low;
- struct Pairs* high;
- struct Pairs* mid;
- int bdiff; /* int not long because the arrays aren't that large */
-
- low = (struct Pairs *) &Arg(3);
- high = low + Arg(2);
-
- /* The pointer subtraction (high-low) below must produce
- * a signed result, because high could be < low. That
- * requires the compiler to insert quite a bit of code.
- *
- * However, high will be > low so the result will be
- * positive. We can use that knowledge to optimise the
- * entire sequence, from the initial comparison to the
- * computation of mid.
- *
- * -- Mikael Pettersson, Acumem AB
- *
- * Original loop control code:
- *
- * while (low < high) {
- * mid = low + (high-low) / 2;
- *
- */
- while ((bdiff = (int)((char*)high - (char*)low)) > 0) {
- unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Pairs)-1);
-
- mid = (struct Pairs*)((char*)low + boffset);
- if (select_val < mid->val) {
- high = mid;
- } else if (select_val > mid->val) {
- low = mid + 1;
- } else {
- SET_I(mid->addr);
- Goto(*I);
- }
- }
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
- }
-
- {
- Eterm jump_on_val_zero_index;
-
- OpCase(i_jump_on_val_zero_yfI):
- jump_on_val_zero_index = yb(Arg(0));
- goto do_jump_on_val_zero_index;
-
- OpCase(i_jump_on_val_zero_xfI):
- jump_on_val_zero_index = xb(Arg(0));
- goto do_jump_on_val_zero_index;
-
- do_jump_on_val_zero_index:
- if (is_small(jump_on_val_zero_index)) {
- jump_on_val_zero_index = signed_val(jump_on_val_zero_index);
- if (jump_on_val_zero_index < Arg(2)) {
- SET_I((BeamInstr *) (&Arg(3))[jump_on_val_zero_index]);
- Goto(*I);
- }
- }
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
-
- {
- Eterm jump_on_val_index;
-
-
- OpCase(i_jump_on_val_yfII):
- jump_on_val_index = yb(Arg(0));
- goto do_jump_on_val_index;
-
- OpCase(i_jump_on_val_xfII):
- jump_on_val_index = xb(Arg(0));
- goto do_jump_on_val_index;
-
- do_jump_on_val_index:
- if (is_small(jump_on_val_index)) {
- jump_on_val_index = (Uint) (signed_val(jump_on_val_index) - Arg(3));
- if (jump_on_val_index < Arg(2)) {
- SET_I((BeamInstr *) (&Arg(4))[jump_on_val_index]);
- Goto(*I);
- }
- }
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
-
- do_put_tuple: {
- Eterm* hp = HTOP;
-
- *hp++ = make_arityval(pt_arity);
-
- do {
- Eterm term = *I++;
- switch (loader_tag(term)) {
- case LOADER_X_REG:
- *hp++ = x(loader_x_reg_index(term));
- break;
- case LOADER_Y_REG:
- *hp++ = y(loader_y_reg_index(term));
- break;
- default:
- *hp++ = term;
- break;
- }
- } while (--pt_arity != 0);
- HTOP = hp;
- Goto(*I);
- }
-
- OpCase(new_map_dII): {
- Eterm res;
-
- HEAVY_SWAPOUT;
- res = new_map(c_p, reg, I-1);
- HEAVY_SWAPIN;
- StoreResult(res, Arg(0));
- Next(3+Arg(2));
- }
-
-#define PUT_TERM_REG(term, desc) \
-do { \
- switch (loader_tag(desc)) { \
- case LOADER_X_REG: \
- x(loader_x_reg_index(desc)) = (term); \
- break; \
- case LOADER_Y_REG: \
- y(loader_y_reg_index(desc)) = (term); \
- break; \
- default: \
- ASSERT(0); \
- break; \
- } \
-} while(0)
-
- OpCase(i_get_map_elements_fsI): {
- Eterm map;
- BeamInstr *fs;
- Uint sz, n;
-
- GetArg1(1, map);
-
- /* this instruction assumes Arg1 is a map,
- * i.e. that it follows a test is_map if needed.
- */
-
- n = (Uint)Arg(2) / 3;
- fs = &Arg(3); /* pattern fields and target registers */
-
- if (is_flatmap(map)) {
- flatmap_t *mp;
- Eterm *ks;
- Eterm *vs;
-
- mp = (flatmap_t *)flatmap_val(map);
- sz = flatmap_get_size(mp);
-
- if (sz == 0) {
- ClauseFail();
- }
-
- ks = flatmap_get_keys(mp);
- vs = flatmap_get_values(mp);
-
- while(sz) {
- if (EQ((Eterm) fs[0], *ks)) {
- PUT_TERM_REG(*vs, fs[1]);
- n--;
- fs += 3;
- /* no more values to fetch, we are done */
- if (n == 0) {
- I = fs;
- Next(-1);
- }
- }
- ks++, sz--, vs++;
- }
-
- ClauseFail();
- } else {
- const Eterm *v;
- Uint32 hx;
- ASSERT(is_hashmap(map));
- while(n--) {
- hx = fs[2];
- ASSERT(hx == hashmap_make_hash((Eterm)fs[0]));
- if ((v = erts_hashmap_get(hx, (Eterm)fs[0], map)) == NULL) {
- ClauseFail();
- }
- PUT_TERM_REG(*v, fs[1]);
- fs += 3;
- }
- I = fs;
- Next(-1);
- }
- }
-#undef PUT_TERM_REG
-
- OpCase(update_map_assoc_jsdII): {
- Eterm res;
- Eterm map;
-
- GetArg1(1, map);
- HEAVY_SWAPOUT;
- res = update_map_assoc(c_p, reg, map, I);
- HEAVY_SWAPIN;
- if (is_value(res)) {
- StoreResult(res, Arg(2));
- Next(5+Arg(4));
- } else {
- /*
- * This can only happen if the code was compiled
- * with the compiler in OTP 17.
- */
- c_p->freason = BADMAP;
- c_p->fvalue = map;
- goto lb_Cl_error;
- }
- }
-
- OpCase(update_map_exact_jsdII): {
- Eterm res;
- Eterm map;
-
- GetArg1(1, map);
- HEAVY_SWAPOUT;
- res = update_map_exact(c_p, reg, map, I);
- HEAVY_SWAPIN;
- if (is_value(res)) {
- StoreResult(res, Arg(2));
- Next(5+Arg(4));
- } else {
- goto lb_Cl_error;
- }
- }
-
-
- /*
- * All guards with zero arguments have special instructions:
- * self/0
- * node/0
- *
- * All other guard BIFs take one or two arguments.
- */
-
- /*
- * Guard BIF in head. On failure, ignore the error and jump
- * to the code for the next clause. We don't support tracing
- * of guard BIFs.
- */
-
- OpCase(bif1_fbsd):
- {
- Eterm (*bf)(Process*, Eterm*);
- Eterm tmp_reg[1];
- Eterm result;
-
- GetArg1(2, tmp_reg[0]);
- bf = (BifFunction) Arg(1);
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- c_p->fcalls = FCALLS;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
- ERTS_CHK_MBUF_SZ(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- if (is_value(result)) {
- StoreBifResult(3, result);
- }
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
-
- /*
- * Guard BIF in body. It can fail like any BIF. No trace support.
- */
-
- OpCase(bif1_body_bsd):
- {
- Eterm (*bf)(Process*, Eterm*);
-
- Eterm tmp_reg[1];
- Eterm result;
-
- GetArg1(1, tmp_reg[0]);
- bf = (BifFunction) Arg(0);
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- c_p->fcalls = FCALLS;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
- ERTS_CHK_MBUF_SZ(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- if (is_value(result)) {
- StoreBifResult(2, result);
- }
- reg[0] = tmp_reg[0];
- SWAPOUT;
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- OpCase(i_gc_bif1_jIsId):
- {
- typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
- GcBifFunction bf;
- Eterm result;
- Uint live = (Uint) Arg(3);
-
- GetArg1(2, x(live));
- bf = (GcBifFunction) Arg(1);
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- c_p->fcalls = FCALLS;
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, reg, live);
- ERTS_CHK_MBUF_SZ(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- SWAPIN;
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- if (is_value(result)) {
- StoreBifResult(4, result);
- }
- if (Arg(0) != 0) {
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
- x(0) = x(live);
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
- goto post_error_handling;
- }
-
- OpCase(i_gc_bif2_jIIssd): /* Note, one less parameter than the i_gc_bif1
- and i_gc_bif3 */
- {
- typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
- GcBifFunction bf;
- Eterm result;
- Uint live = (Uint) Arg(2);
-
- GetArg2(3, x(live), x(live+1));
- /*
- * XXX This calling convention does not make sense. 'live'
- * should point out the first argument, not the second
- * (i.e. 'live' should not be incremented below).
- */
- live++;
- bf = (GcBifFunction) Arg(1);
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- c_p->fcalls = FCALLS;
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, reg, live);
- ERTS_CHK_MBUF_SZ(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- SWAPIN;
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- if (is_value(result)) {
- StoreBifResult(5, result);
- }
- if (Arg(0) != 0) {
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
- live--;
- x(0) = x(live);
- x(1) = x(live+1);
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
- goto post_error_handling;
- }
-
- OpCase(i_gc_bif3_jIIssd):
- {
- typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
- GcBifFunction bf;
- Eterm result;
- Uint live = (Uint) Arg(2);
-
- x(live) = x(SCRATCH_X_REG);
- GetArg2(3, x(live+1), x(live+2));
- /*
- * XXX This calling convention does not make sense. 'live'
- * should point out the first argument, not the third
- * (i.e. 'live' should not be incremented below).
- */
- live += 2;
- bf = (GcBifFunction) Arg(1);
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- c_p->fcalls = FCALLS;
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, reg, live);
- ERTS_CHK_MBUF_SZ(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- SWAPIN;
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- if (is_value(result)) {
- StoreBifResult(5, result);
- }
- if (Arg(0) != 0) {
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
- live -= 2;
- x(0) = x(live);
- x(1) = x(live+1);
- x(2) = x(live+2);
- I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
- goto post_error_handling;
- }
-
- /*
- * Guards bifs and, or, xor in guards.
- */
- OpCase(i_bif2_fbssd):
- {
- Eterm tmp_reg[2];
- Eterm (*bf)(Process*, Eterm*);
- Eterm result;
-
- GetArg2(2, tmp_reg[0], tmp_reg[1]);
- bf = (BifFunction) Arg(1);
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- c_p->fcalls = FCALLS;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
- ERTS_CHK_MBUF_SZ(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_HOLE_CHECK(c_p);
- FCALLS = c_p->fcalls;
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- if (is_value(result)) {
- StoreBifResult(4, result);
- }
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
-
- /*
- * Guards bifs and, or, xor, relational operators in body.
- */
- OpCase(i_bif2_body_bssd):
- {
- Eterm tmp_reg[2];
- Eterm (*bf)(Process*, Eterm*);
- Eterm result;
-
- GetArg2(1, tmp_reg[0], tmp_reg[1]);
- bf = (BifFunction) Arg(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, tmp_reg);
- ERTS_CHK_MBUF_SZ(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_HOLE_CHECK(c_p);
- if (is_value(result)) {
- ASSERT(!is_CP(result));
- StoreBifResult(3, result);
- }
- reg[0] = tmp_reg[0];
- reg[1] = tmp_reg[1];
- SWAPOUT;
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- /*
- * The most general BIF call. The BIF may build any amount of data
- * on the heap. The result is always returned in r(0).
- */
- OpCase(call_bif_e):
- {
- Eterm (*bf)(Process*, Eterm*, BeamInstr*);
- Eterm result;
- BeamInstr *next;
- ErlHeapFragment *live_hf_end;
-
-
- if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) {
- /* If we have run out of reductions, we do a context
- switch before calling the bif */
- c_p->arity = ((Export *)Arg(0))->code[2];
- c_p->current = ((Export *)Arg(0))->code;
- goto context_switch3;
- }
-
- ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_BIF_MODULE(Arg(0)), GET_BIF_ADDRESS(Arg(0)));
-
- bf = GET_BIF_ADDRESS(Arg(0));
-
- PRE_BIF_SWAPOUT(c_p);
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- c_p->fcalls = FCALLS - 1;
- if (FCALLS <= 0) {
- save_calls(c_p, (Export *) Arg(0));
- }
- PreFetch(1, next);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- live_hf_end = c_p->mbuf;
- ERTS_CHK_MBUF_SZ(c_p);
- result = (*bf)(c_p, reg, I);
- ERTS_CHK_MBUF_SZ(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_HOLE_CHECK(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- if (ERTS_IS_GC_DESIRED(c_p)) {
- Uint arity = ((Export *)Arg(0))->code[2];
- result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result, reg, arity);
- E = c_p->stop;
- }
- PROCESS_MAIN_CHK_LOCKS(c_p);
- HTOP = HEAP_TOP(c_p);
- FCALLS = c_p->fcalls;
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- /* We have to update the cache if we are enabled in order
- to make sure no book keeping is done after we disabled
- msacc. We don't always do this as it is quite expensive. */
- if (ERTS_MSACC_IS_ENABLED_CACHED_X())
- ERTS_MSACC_UPDATE_CACHE_X();
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
- if (is_value(result)) {
- r(0) = result;
- CHECK_TERM(r(0));
- NextPF(1, next);
- } else if (c_p->freason == TRAP) {
- SET_CP(c_p, I+2);
- SET_I(c_p->i);
- SWAPIN;
- Dispatch();
- }
-
- /*
- * Error handling. SWAPOUT is not needed because it was done above.
- */
- ASSERT(c_p->stop == E);
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- /*
- * Arithmetic operations.
- */
-
- OpCase(i_times_jIssd):
- {
- Eterm Op1, Op2;
- GetArg2(2, Op1, Op2);
- DO_OUTLINED_ARITH_2(mixed_times, Op1, Op2);
- }
-
- OpCase(i_m_div_jIssd):
- {
- Eterm Op1, Op2;
- GetArg2(2, Op1, Op2);
- DO_OUTLINED_ARITH_2(mixed_div, Op1, Op2);
- }
-
- OpCase(i_int_div_jIssd):
- {
- Eterm Op1, Op2;
-
- GetArg2(2, Op1, Op2);
- if (Op2 == SMALL_ZERO) {
- goto badarith;
- } else if (is_both_small(Op1, Op2)) {
- Sint ires = signed_val(Op1) / signed_val(Op2);
- if (MY_IS_SSMALL(ires)) {
- Eterm result = make_small(ires);
- StoreBifResult(4, result);
- }
- }
- DO_OUTLINED_ARITH_2(int_div, Op1, Op2);
- }
-
- {
- Eterm RemOp1, RemOp2;
-
- OpCase(i_rem_jIxxd):
- RemOp1 = xb(Arg(2));
- RemOp2 = xb(Arg(3));
- goto do_rem;
-
- OpCase(i_rem_jIssd):
- GetArg2(2, RemOp1, RemOp2);
- goto do_rem;
-
- do_rem:
- if (RemOp2 == SMALL_ZERO) {
- goto badarith;
- } else if (is_both_small(RemOp1, RemOp2)) {
- Eterm result = make_small(signed_val(RemOp1) % signed_val(RemOp2));
- StoreBifResult(4, result);
- } else {
- DO_OUTLINED_ARITH_2(int_rem, RemOp1, RemOp2);
- }
- }
-
- {
- Eterm BandOp1, BandOp2;
-
- OpCase(i_band_jIxcd):
- BandOp1 = xb(Arg(2));
- BandOp2 = Arg(3);
- goto do_band;
-
- OpCase(i_band_jIssd):
- GetArg2(2, BandOp1, BandOp2);
- goto do_band;
-
- do_band:
- if (is_both_small(BandOp1, BandOp2)) {
- /*
- * No need to untag -- TAG & TAG == TAG.
- */
- Eterm result = BandOp1 & BandOp2;
- StoreBifResult(4, result);
- }
- DO_OUTLINED_ARITH_2(band, BandOp1, BandOp2);
- }
-
- /*
- * An error occured in an arithmetic operation or test that could
- * appear either in a head or in a body.
- * In a head, execution should continue at failure address in Arg(0).
- * In a body, Arg(0) == 0 and an exception should be raised.
- */
- lb_Cl_error: {
- if (Arg(0) != 0) {
- OpCase(jump_f): {
- jump_f:
- SET_I((BeamInstr *) Arg(0));
- Goto(*I);
- }
- }
- ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
- goto find_func_info;
- }
-
- OpCase(i_bor_jIssd):
- {
- Eterm Op1, Op2;
-
- GetArg2(2, Op1, Op2);
- if (is_both_small(Op1, Op2)) {
- /*
- * No need to untag -- TAG | TAG == TAG.
- */
- Eterm result = Op1 | Op2;
- StoreBifResult(4, result);
- }
- DO_OUTLINED_ARITH_2(bor, Op1, Op2);
- }
-
- OpCase(i_bxor_jIssd):
- {
- Eterm Op1, Op2;
-
- GetArg2(2, Op1, Op2);
- if (is_both_small(Op1, Op2)) {
- /*
- * We could extract the tag from one argument, but a tag extraction
- * could mean a shift. Therefore, play it safe here.
- */
- Eterm result = make_small(signed_val(Op1) ^ signed_val(Op2));
- StoreBifResult(4, result);
- }
- DO_OUTLINED_ARITH_2(bxor, Op1, Op2);
- }
-
- {
- Eterm Op1, Op2;
- Sint i;
- Sint ires;
- Eterm* bigp;
- Eterm tmp_big[2];
-
- OpCase(i_bsr_jIssd):
- GetArg2(2, Op1, Op2);
- if (is_small(Op2)) {
- i = -signed_val(Op2);
- if (is_small(Op1)) {
- goto small_shift;
- } else if (is_big(Op1)) {
- if (i == 0) {
- StoreBifResult(4, Op1);
- }
- ires = big_size(Op1);
- goto big_shift;
- }
- } else if (is_big(Op2)) {
- /*
- * N bsr NegativeBigNum == N bsl MAX_SMALL
- * N bsr PositiveBigNum == N bsl MIN_SMALL
- */
- Op2 = make_small(bignum_header_is_neg(*big_val(Op2)) ?
- MAX_SMALL : MIN_SMALL);
- goto do_bsl;
- }
- goto badarith;
-
- OpCase(i_bsl_jIssd):
- GetArg2(2, Op1, Op2);
- do_bsl:
- if (is_small(Op2)) {
- i = signed_val(Op2);
-
- if (is_small(Op1)) {
- small_shift:
- ires = signed_val(Op1);
-
- if (i == 0 || ires == 0) {
- StoreBifResult(4, Op1);
- } else if (i < 0) { /* Right shift */
- i = -i;
- if (i >= SMALL_BITS-1) {
- Op1 = (ires < 0) ? SMALL_MINUS_ONE : SMALL_ZERO;
- } else {
- Op1 = make_small(ires >> i);
- }
- StoreBifResult(4, Op1);
- } else if (i < SMALL_BITS-1) { /* Left shift */
- if ((ires > 0 && ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ires) == 0) ||
- ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ~ires) == 0) {
- Op1 = make_small(ires << i);
- StoreBifResult(4, Op1);
- }
- }
- ires = 1; /* big_size(small_to_big(Op1)) */
-
- big_shift:
- if (i > 0) { /* Left shift. */
- ires += (i / D_EXP);
- } else { /* Right shift. */
- if (ires <= (-i / D_EXP))
- ires = 3; /* ??? */
- else
- ires -= (-i / D_EXP);
- }
- {
- ires = BIG_NEED_SIZE(ires+1);
- /*
- * Slightly conservative check the size to avoid
- * allocating huge amounts of memory for bignums that
- * clearly would overflow the arity in the header
- * word.
- */
- if (ires-8 > BIG_ARITY_MAX) {
- c_p->freason = SYSTEM_LIMIT;
- goto lb_Cl_error;
- }
- TestHeapPreserve(ires+1, Arg(1), Op1);
- if (is_small(Op1)) {
- Op1 = small_to_big(signed_val(Op1), tmp_big);
- }
- bigp = HTOP;
- Op1 = big_lshift(Op1, i, bigp);
- if (is_big(Op1)) {
- HTOP += bignum_header_arity(*HTOP) + 1;
- }
- HEAP_SPACE_VERIFIED(0);
- if (is_nil(Op1)) {
- /*
- * This result must have been only slight larger
- * than allowed since it wasn't caught by the
- * previous test.
- */
- c_p->freason = SYSTEM_LIMIT;
- goto lb_Cl_error;
- }
- ERTS_HOLE_CHECK(c_p);
- StoreBifResult(4, Op1);
- }
- } else if (is_big(Op1)) {
- if (i == 0) {
- StoreBifResult(4, Op1);
- }
- ires = big_size(Op1);
- goto big_shift;
- }
- } else if (is_big(Op2)) {
- if (bignum_header_is_neg(*big_val(Op2))) {
- /*
- * N bsl NegativeBigNum is either 0 or -1, depending on
- * the sign of N. Since we don't believe this case
- * is common, do the calculation with the minimum
- * amount of code.
- */
- Op2 = make_small(MIN_SMALL);
- goto do_bsl;
- } else if (is_small(Op1) || is_big(Op1)) {
- /*
- * N bsl PositiveBigNum is too large to represent.
- */
- c_p->freason = SYSTEM_LIMIT;
- goto lb_Cl_error;
- }
- /* Fall through if the left argument is not an integer. */
- }
- /*
- * One or more non-integer arguments.
- */
- goto badarith;
- }
-
- OpCase(i_int_bnot_jsId):
- {
- Eterm bnot_val;
-
- GetArg1(1, bnot_val);
- if (is_small(bnot_val)) {
- bnot_val = make_small(~signed_val(bnot_val));
- } else {
- Uint live = Arg(2);
- HEAVY_SWAPOUT;
- reg[live] = bnot_val;
- bnot_val = erts_gc_bnot(c_p, reg, live);
- HEAVY_SWAPIN;
- ERTS_HOLE_CHECK(c_p);
- if (is_nil(bnot_val)) {
- goto lb_Cl_error;
- }
- }
- StoreBifResult(3, bnot_val);
- }
-
- badarith:
- c_p->freason = BADARITH;
- goto lb_Cl_error;
-
- OpCase(i_apply): {
- BeamInstr *next;
- HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
- HEAVY_SWAPIN;
- if (next != NULL) {
- SET_CP(c_p, I+1);
- SET_I(next);
- Dispatch();
- }
- I = handle_error(c_p, I, reg, apply_3);
- goto post_error_handling;
- }
-
- OpCase(i_apply_last_P): {
- BeamInstr *next;
- HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
- HEAVY_SWAPIN;
- if (next != NULL) {
- SET_CP(c_p, (BeamInstr *) E[0]);
- E = ADD_BYTE_OFFSET(E, Arg(0));
- SET_I(next);
- Dispatch();
- }
- I = handle_error(c_p, I, reg, apply_3);
- goto post_error_handling;
- }
-
- OpCase(i_apply_only): {
- BeamInstr *next;
- HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
- HEAVY_SWAPIN;
- if (next != NULL) {
- SET_I(next);
- Dispatch();
- }
- I = handle_error(c_p, I, reg, apply_3);
- goto post_error_handling;
- }
-
- OpCase(apply_I): {
- BeamInstr *next;
-
- HEAVY_SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
- HEAVY_SWAPIN;
- if (next != NULL) {
- SET_CP(c_p, I+2);
- SET_I(next);
- Dispatch();
- }
- I = handle_error(c_p, I, reg, apply_3);
- goto post_error_handling;
- }
-
- OpCase(apply_last_IP): {
- BeamInstr *next;
-
- HEAVY_SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
- HEAVY_SWAPIN;
- if (next != NULL) {
- SET_CP(c_p, (BeamInstr *) E[0]);
- E = ADD_BYTE_OFFSET(E, Arg(1));
- SET_I(next);
- Dispatch();
- }
- I = handle_error(c_p, I, reg, apply_3);
- goto post_error_handling;
- }
-
- OpCase(i_apply_fun): {
- BeamInstr *next;
-
- HEAVY_SWAPOUT;
- next = apply_fun(c_p, r(0), x(1), reg);
- HEAVY_SWAPIN;
- if (next != NULL) {
- SET_CP(c_p, I+1);
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
-
- OpCase(i_apply_fun_last_P): {
- BeamInstr *next;
-
- HEAVY_SWAPOUT;
- next = apply_fun(c_p, r(0), x(1), reg);
- HEAVY_SWAPIN;
- if (next != NULL) {
- SET_CP(c_p, (BeamInstr *) E[0]);
- E = ADD_BYTE_OFFSET(E, Arg(0));
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
-
- OpCase(i_apply_fun_only): {
- BeamInstr *next;
- HEAVY_SWAPOUT;
- next = apply_fun(c_p, r(0), x(1), reg);
- HEAVY_SWAPIN;
- if (next != NULL) {
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
-
- OpCase(i_call_fun_I): {
- BeamInstr *next;
-
- HEAVY_SWAPOUT;
- next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE);
- HEAVY_SWAPIN;
- if (next != NULL) {
- SET_CP(c_p, I+2);
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
-
- OpCase(i_call_fun_last_IP): {
- BeamInstr *next;
-
- HEAVY_SWAPOUT;
- next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE);
- HEAVY_SWAPIN;
- if (next != NULL) {
- SET_CP(c_p, (BeamInstr *) E[0]);
- E = ADD_BYTE_OFFSET(E, Arg(1));
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
+#include "beam_hot.h"
#ifdef DEBUG
/*
@@ -3361,14 +805,15 @@ do { \
* called from I[-3], I[-2], and I[-1] respectively.
*/
context_switch_fun:
- c_p->arity = I[-1] + 1;
+ /* Add one for the environment of the fun */
+ c_p->arity = erts_code_to_codemfa(I)->arity + 1;
goto context_switch2;
context_switch:
- c_p->arity = I[-1];
+ c_p->arity = erts_code_to_codemfa(I)->arity;
- context_switch2: /* Entry for fun calls. */
- c_p->current = I-3; /* Pointer to Mod, Func, Arity */
+ context_switch2: /* Entry for fun calls. */
+ c_p->current = erts_code_to_codemfa(I);
context_switch3:
@@ -3376,7 +821,7 @@ do { \
Eterm* argp;
int i;
- if (erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) {
+ if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) {
c_p->i = beam_exit;
c_p->arity = 0;
c_p->current = NULL;
@@ -3433,64 +878,25 @@ do { \
goto do_schedule1;
}
- OpCase(set_tuple_element_sdP): {
- Eterm element;
- Eterm tuple;
- BeamInstr *next;
- Eterm* p;
-
- PreFetch(3, next);
- GetArg1(0, element);
- tuple = REG_TARGET(Arg(1));
- ASSERT(is_tuple(tuple));
- p = (Eterm *) ((unsigned char *) tuple_val(tuple) + Arg(2));
- *p = element;
- NextPF(3, next);
- }
+#include "beam_warm.h"
OpCase(normal_exit): {
SWAPOUT;
c_p->freason = EXC_NORMAL;
- c_p->arity = 0; /* In case this process will never be garbed again. */
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ c_p->arity = 0; /* In case this process will ever be garbed again. */
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
erts_do_exit_process(c_p, am_normal);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
goto do_schedule;
}
OpCase(continue_exit): {
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
erts_continue_exit_process(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
goto do_schedule;
}
- OpCase(i_raise): {
- Eterm raise_trace = x(2);
- Eterm raise_value = x(1);
- struct StackTrace *s;
-
- c_p->fvalue = raise_value;
- c_p->ftrace = raise_trace;
- s = get_trace_from_exc(raise_trace);
- if (s == NULL) {
- c_p->freason = EXC_ERROR;
- } else {
- c_p->freason = PRIMARY_EXCEPTION(s->freason);
- }
- goto find_func_info;
- }
-
- {
- Eterm badmatch_val;
-
- OpCase(badmatch_x):
- badmatch_val = xb(Arg(0));
- c_p->fvalue = badmatch_val;
- c_p->freason = BADMATCH;
- }
- /* Fall through here */
-
find_func_info: {
SWAPOUT;
I = handle_error(c_p, I, reg, NULL);
@@ -3509,7 +915,8 @@ do { \
* code[4]: Not used
*/
HEAVY_SWAPOUT;
- I = call_error_handler(c_p, I-3, reg, am_undefined_function);
+ I = call_error_handler(c_p, erts_code_to_codemfa(I),
+ reg, am_undefined_function);
HEAVY_SWAPIN;
if (I) {
Goto(*I);
@@ -3530,1556 +937,15 @@ do { \
}
}
- {
- Eterm nif_bif_result;
- Eterm bif_nif_arity;
-
- OpCase(call_nif):
- {
- /*
- * call_nif is always first instruction in function:
- *
- * I[-3]: Module
- * I[-2]: Function
- * I[-1]: Arity
- * I[0]: &&call_nif
- * I[1]: Function pointer to NIF function
- * I[2]: Pointer to erl_module_nif
- * I[3]: Function pointer to dirty NIF
- */
- BifFunction vbf;
- ErlHeapFragment *live_hf_end;
-
- if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) {
- /* If we have run out of reductions, we do a context
- switch before calling the nif */
- goto context_switch;
- }
-
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
-
- DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- c_p->current = I-3; /* current and vbf set to please handle_error */
- SWAPOUT;
- c_p->fcalls = FCALLS - 1;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- bif_nif_arity = I[-1];
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
-
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- {
- typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
- NifF* fp = vbf = (NifF*) I[1];
- struct enif_environment_t env;
-#ifdef ERTS_SMP
- ASSERT(c_p->scheduler_data);
-#endif
- live_hf_end = c_p->mbuf;
- ERTS_CHK_MBUF_SZ(c_p);
- erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL);
- nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
- if (env.exception_thrown)
- nif_bif_result = THE_NON_VALUE;
- erts_post_nif(&env);
- ERTS_CHK_MBUF_SZ(c_p);
-
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
- ASSERT(!env.exiting);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- }
-
- DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- goto apply_bif_or_nif_epilogue;
-
- OpCase(apply_bif):
- /*
- * At this point, I points to the code[3] in the export entry for
- * the BIF:
- *
- * code[0]: Module
- * code[1]: Function
- * code[2]: Arity
- * code[3]: &&apply_bif
- * code[4]: Function pointer to BIF function
- */
-
- if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) {
- /* If we have run out of reductions, we do a context
- switch before calling the bif */
- goto context_switch;
- }
-
- ERTS_MSACC_SET_BIF_STATE_CACHED_X((Eterm)I[-3], (BifFunction)Arg(0));
-
- c_p->current = I-3; /* In case we apply process_info/1,2 or load_nif/1 */
- c_p->i = I; /* In case we apply check_process_code/2. */
- c_p->arity = 0; /* To allow garbage collection on ourselves
- * (check_process_code/2).
- */
- DTRACE_BIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
-
- SWAPOUT;
- ERTS_DBG_CHK_REDS(c_p, FCALLS - 1);
- c_p->fcalls = FCALLS - 1;
- vbf = (BifFunction) Arg(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- bif_nif_arity = I[-1];
- ASSERT(bif_nif_arity <= 4);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- {
- Eterm (*bf)(Process*, Eterm*, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- live_hf_end = c_p->mbuf;
- ERTS_CHK_MBUF_SZ(c_p);
- nif_bif_result = (*bf)(c_p, reg, I);
- ERTS_CHK_MBUF_SZ(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
- is_non_value(nif_bif_result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- }
- /* We have to update the cache if we are enabled in order
- to make sure no book keeping is done after we disabled
- msacc. We don't always do this as it is quite expensive. */
- if (ERTS_MSACC_IS_ENABLED_CACHED_X())
- ERTS_MSACC_UPDATE_CACHE_X();
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
- DTRACE_BIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
-
- apply_bif_or_nif_epilogue:
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- ERTS_HOLE_CHECK(c_p);
- if (ERTS_IS_GC_DESIRED(c_p)) {
- nif_bif_result = erts_gc_after_bif_call_lhf(c_p, live_hf_end,
- nif_bif_result,
- reg, bif_nif_arity);
- }
- SWAPIN; /* There might have been a garbage collection. */
- FCALLS = c_p->fcalls;
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- if (is_value(nif_bif_result)) {
- r(0) = nif_bif_result;
- CHECK_TERM(r(0));
- SET_I(c_p->cp);
- c_p->cp = 0;
- Goto(*I);
- } else if (c_p->freason == TRAP) {
- SET_I(c_p->i);
- if (c_p->flags & F_HIBERNATE_SCHED) {
- c_p->flags &= ~F_HIBERNATE_SCHED;
- goto do_schedule;
- }
- Dispatch();
- }
- I = handle_error(c_p, c_p->cp, reg, vbf);
- goto post_error_handling;
- }
- }
-
- OpCase(i_get_sd):
- {
- Eterm arg;
- Eterm result;
-
- GetArg1(0, arg);
- result = erts_pd_hash_get(c_p, arg);
- StoreBifResult(1, result);
- }
-
- OpCase(i_get_hash_cId):
- {
- Eterm arg;
- Eterm result;
-
- GetArg1(0, arg);
- result = erts_pd_hash_get_with_hx(c_p, Arg(1), arg);
- StoreBifResult(2, result);
- }
-
- {
- Eterm case_end_val;
-
- OpCase(case_end_x):
- case_end_val = xb(Arg(0));
- c_p->fvalue = case_end_val;
- c_p->freason = EXC_CASE_CLAUSE;
- goto find_func_info;
- }
-
- OpCase(if_end):
- c_p->freason = EXC_IF_CLAUSE;
- goto find_func_info;
-
OpCase(i_func_info_IaaI): {
+ ErtsCodeInfo *ci = (ErtsCodeInfo*)I;
c_p->freason = EXC_FUNCTION_CLAUSE;
- c_p->current = I + 2;
+ c_p->current = &ci->mfa;
goto handle_error;
}
- OpCase(try_case_end_s):
- {
- Eterm try_case_end_val;
- GetArg1(0, try_case_end_val);
- c_p->fvalue = try_case_end_val;
- c_p->freason = EXC_TRY_CLAUSE;
- goto find_func_info;
- }
-
- /*
- * Construction of binaries using new instructions.
- */
- {
- Eterm new_binary;
- Eterm num_bits_term;
- Uint num_bits;
- Uint alloc;
- Uint num_bytes;
-
- OpCase(i_bs_init_bits_heap_IIId): {
- num_bits = Arg(0);
- alloc = Arg(1);
- I++;
- goto do_bs_init_bits_known;
- }
-
- OpCase(i_bs_init_bits_IId): {
- num_bits = Arg(0);
- alloc = 0;
- goto do_bs_init_bits_known;
- }
-
- OpCase(i_bs_init_bits_fail_heap_sIjId): {
- GetArg1(0, num_bits_term);
- alloc = Arg(1);
- I += 2;
- goto do_bs_init_bits;
- }
-
- OpCase(i_bs_init_bits_fail_yjId): {
- num_bits_term = yb(Arg(0));
- I++;
- alloc = 0;
- goto do_bs_init_bits;
- }
- OpCase(i_bs_init_bits_fail_xjId): {
- num_bits_term = xb(Arg(0));
- I++;
- alloc = 0;
- /* FALL THROUGH */
- }
-
- /* num_bits_term = Term for number of bits to build (small/big)
- * alloc = Number of words to allocate on heap
- * Operands: Fail Live Dst
- */
-
- do_bs_init_bits:
- if (is_small(num_bits_term)) {
- Sint size = signed_val(num_bits_term);
- if (size < 0) {
- goto badarg;
- }
- num_bits = (Uint) size;
- } else {
- Uint bits;
-
- if (!term_to_Uint(num_bits_term, &bits)) {
- c_p->freason = bits;
- goto lb_Cl_error;
-
- }
- num_bits = (Eterm) bits;
- }
-
- /* num_bits = Number of bits to build
- * alloc = Number of extra words to allocate on heap
- * Operands: NotUsed Live Dst
- */
- do_bs_init_bits_known:
- num_bytes = ((Uint64)num_bits+(Uint64)7) >> 3;
- if (num_bits & 7) {
- alloc += ERL_SUB_BIN_SIZE;
- }
- if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
- alloc += heap_bin_size(num_bytes);
- } else {
- alloc += PROC_BIN_SIZE;
- }
- TestHeap(alloc, Arg(1));
-
- /* num_bits = Number of bits to build
- * num_bytes = Number of bytes to allocate in the binary
- * alloc = Total number of words to allocate on heap
- * Operands: NotUsed NotUsed Dst
- */
- if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
- ErlHeapBin* hb;
-
- erts_bin_offset = 0;
- erts_writable_bin = 0;
- hb = (ErlHeapBin *) HTOP;
- HTOP += heap_bin_size(num_bytes);
- hb->thing_word = header_heap_bin(num_bytes);
- hb->size = num_bytes;
- erts_current_bin = (byte *) hb->data;
- new_binary = make_binary(hb);
-
- do_bits_sub_bin:
- if (num_bits & 7) {
- ErlSubBin* sb;
-
- sb = (ErlSubBin *) HTOP;
- HTOP += ERL_SUB_BIN_SIZE;
- sb->thing_word = HEADER_SUB_BIN;
- sb->size = num_bytes - 1;
- sb->bitsize = num_bits & 7;
- sb->offs = 0;
- sb->bitoffs = 0;
- sb->is_writable = 0;
- sb->orig = new_binary;
- new_binary = make_binary(sb);
- }
- HEAP_SPACE_VERIFIED(0);
- StoreBifResult(2, new_binary);
- } else {
- Binary* bptr;
- ProcBin* pb;
-
- erts_bin_offset = 0;
- erts_writable_bin = 0;
-
- /*
- * Allocate the binary struct itself.
- */
- bptr = erts_bin_nrml_alloc(num_bytes);
- erts_refc_init(&bptr->refc, 1);
- erts_current_bin = (byte *) bptr->orig_bytes;
-
- /*
- * Now allocate the ProcBin on the heap.
- */
- pb = (ProcBin *) HTOP;
- HTOP += PROC_BIN_SIZE;
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = num_bytes;
- pb->next = MSO(c_p).first;
- MSO(c_p).first = (struct erl_off_heap_header*) pb;
- pb->val = bptr;
- pb->bytes = (byte*) bptr->orig_bytes;
- pb->flags = 0;
- OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm));
- new_binary = make_binary(pb);
- goto do_bits_sub_bin;
- }
- }
-
- {
- Eterm BsOp1, BsOp2;
-
- OpCase(i_bs_init_fail_heap_sIjId): {
- GetArg1(0, BsOp1);
- BsOp2 = Arg(1);
- I += 2;
- goto do_bs_init;
- }
-
- OpCase(i_bs_init_fail_yjId): {
- BsOp1 = yb(Arg(0));
- BsOp2 = 0;
- I++;
- goto do_bs_init;
- }
-
- OpCase(i_bs_init_fail_xjId): {
- BsOp1 = xb(Arg(0));
- BsOp2 = 0;
- I++;
- }
- /* FALL THROUGH */
- do_bs_init:
- if (is_small(BsOp1)) {
- Sint size = signed_val(BsOp1);
- if (size < 0) {
- goto badarg;
- }
- BsOp1 = (Eterm) size;
- } else {
- Uint bytes;
-
- if (!term_to_Uint(BsOp1, &bytes)) {
- c_p->freason = bytes;
- goto lb_Cl_error;
- }
- if ((bytes >> (8*sizeof(Uint)-3)) != 0) {
- goto system_limit;
- }
- BsOp1 = (Eterm) bytes;
- }
- if (BsOp1 <= ERL_ONHEAP_BIN_LIMIT) {
- goto do_heap_bin_alloc;
- } else {
- goto do_proc_bin_alloc;
- }
-
-
- OpCase(i_bs_init_heap_IIId): {
- BsOp1 = Arg(0);
- BsOp2 = Arg(1);
- I++;
- goto do_proc_bin_alloc;
- }
-
- OpCase(i_bs_init_IId): {
- BsOp1 = Arg(0);
- BsOp2 = 0;
- }
- /* FALL THROUGH */
- do_proc_bin_alloc: {
- Binary* bptr;
- ProcBin* pb;
-
- erts_bin_offset = 0;
- erts_writable_bin = 0;
- TestBinVHeap(BsOp1 / sizeof(Eterm),
- BsOp2 + PROC_BIN_SIZE + ERL_SUB_BIN_SIZE, Arg(1));
-
- /*
- * Allocate the binary struct itself.
- */
- bptr = erts_bin_nrml_alloc(BsOp1);
- erts_refc_init(&bptr->refc, 1);
- erts_current_bin = (byte *) bptr->orig_bytes;
-
- /*
- * Now allocate the ProcBin on the heap.
- */
- pb = (ProcBin *) HTOP;
- HTOP += PROC_BIN_SIZE;
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = BsOp1;
- pb->next = MSO(c_p).first;
- MSO(c_p).first = (struct erl_off_heap_header*) pb;
- pb->val = bptr;
- pb->bytes = (byte*) bptr->orig_bytes;
- pb->flags = 0;
-
- OH_OVERHEAD(&(MSO(c_p)), BsOp1 / sizeof(Eterm));
-
- StoreBifResult(2, make_binary(pb));
- }
-
- OpCase(i_bs_init_heap_bin_heap_IIId): {
- BsOp1 = Arg(0);
- BsOp2 = Arg(1);
- I++;
- goto do_heap_bin_alloc;
- }
-
- OpCase(i_bs_init_heap_bin_IId): {
- BsOp1 = Arg(0);
- BsOp2 = 0;
- }
- /* Fall through */
- do_heap_bin_alloc:
- {
- ErlHeapBin* hb;
- Uint bin_need;
-
- bin_need = heap_bin_size(BsOp1);
- erts_bin_offset = 0;
- erts_writable_bin = 0;
- TestHeap(bin_need+BsOp2+ERL_SUB_BIN_SIZE, Arg(1));
- hb = (ErlHeapBin *) HTOP;
- HTOP += bin_need;
- hb->thing_word = header_heap_bin(BsOp1);
- hb->size = BsOp1;
- erts_current_bin = (byte *) hb->data;
- BsOp1 = make_binary(hb);
- StoreBifResult(2, BsOp1);
- }
- }
-
- OpCase(bs_add_jssId): {
- Eterm Op1, Op2;
- Uint Unit = Arg(3);
-
- GetArg2(1, Op1, Op2);
- if (is_both_small(Op1, Op2)) {
- Sint Arg1 = signed_val(Op1);
- Sint Arg2 = signed_val(Op2);
-
- if (Arg1 >= 0 && Arg2 >= 0) {
- BsSafeMul(Arg2, Unit, goto system_limit, Op1);
- Op1 += Arg1;
-
- store_bs_add_result:
- if (Op1 <= MAX_SMALL) {
- Op1 = make_small(Op1);
- } else {
- /*
- * May generate a heap fragment, but in this
- * particular case it is OK, since the value will be
- * stored into an x register (the GC will scan x
- * registers for references to heap fragments) and
- * there is no risk that value can be stored into a
- * location that is not scanned for heap-fragment
- * references (such as the heap).
- */
- SWAPOUT;
- Op1 = erts_make_integer(Op1, c_p);
- HTOP = HEAP_TOP(c_p);
- }
- StoreBifResult(4, Op1);
- }
- goto badarg;
- } else {
- Uint a;
- Uint b;
- Uint c;
-
- /*
- * Now we know that one of the arguments is
- * not a small. We must convert both arguments
- * to Uints and check for errors at the same time.
- *
- * Error checking is tricky.
- *
- * If one of the arguments is not numeric or
- * not positive, the error reason is BADARG.
- *
- * Otherwise if both arguments are numeric,
- * but at least one argument does not fit in
- * an Uint, the reason is SYSTEM_LIMIT.
- */
-
- if (!term_to_Uint(Op1, &a)) {
- if (a == BADARG) {
- goto badarg;
- }
- if (!term_to_Uint(Op2, &b)) {
- c_p->freason = b;
- goto lb_Cl_error;
- }
- goto system_limit;
- } else if (!term_to_Uint(Op2, &b)) {
- c_p->freason = b;
- goto lb_Cl_error;
- }
-
- /*
- * The arguments are now correct and stored in a and b.
- */
-
- BsSafeMul(b, Unit, goto system_limit, c);
- Op1 = a + c;
- if (Op1 < a) {
- /*
- * If the result is less than one of the
- * arguments, there must have been an overflow.
- */
- goto system_limit;
- }
- goto store_bs_add_result;
- }
- /* No fallthrough */
- ASSERT(0);
- }
-
- OpCase(bs_put_string_II):
- {
- BeamInstr *next;
- PreFetch(2, next);
- erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) Arg(1), Arg(0)));
- NextPF(2, next);
- }
-
- /*
- * x(SCRATCH_X_REG);
- * Operands: Fail ExtraHeap Live Unit Size Dst
- */
-
- OpCase(i_bs_append_jIIIsd): {
- Uint live = Arg(2);
- Uint res;
- Eterm Size;
-
- GetArg1(4, Size);
- HEAVY_SWAPOUT;
- reg[live] = x(SCRATCH_X_REG);
- res = erts_bs_append(c_p, reg, live, Size, Arg(1), Arg(3));
- HEAVY_SWAPIN;
- if (is_non_value(res)) {
- /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */
- goto lb_Cl_error;
- }
- StoreBifResult(5, res);
- }
-
- /*
- * Operands: Fail Size Src Unit Dst
- */
- OpCase(i_bs_private_append_jIssd): {
- Eterm res;
- Eterm Size, Src;
-
- GetArg2(2, Size, Src);
- res = erts_bs_private_append(c_p, Src, Size, Arg(1));
- if (is_non_value(res)) {
- /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */
- goto lb_Cl_error;
- }
- StoreBifResult(4, res);
- }
-
- OpCase(bs_init_writable): {
- HEAVY_SWAPOUT;
- r(0) = erts_bs_init_writable(c_p, r(0));
- HEAVY_SWAPIN;
- Next(0);
- }
-
- /*
- * Calculate the number of bytes needed to encode the source
- * operarand to UTF-8. If the source operand is invalid (e.g. wrong
- * type or range) we return a nonsense integer result (0 or 4). We
- * can get away with that because we KNOW that bs_put_utf8 will do
- * full error checking.
- */
- OpCase(i_bs_utf8_size_sd): {
- Eterm arg;
- Eterm result;
-
- GetArg1(0, arg);
- if (arg < make_small(0x80UL)) {
- result = make_small(1);
- } else if (arg < make_small(0x800UL)) {
- result = make_small(2);
- } else if (arg < make_small(0x10000UL)) {
- result = make_small(3);
- } else {
- result = make_small(4);
- }
- StoreBifResult(1, result);
- }
-
- OpCase(i_bs_put_utf8_js): {
- Eterm arg;
-
- GetArg1(1, arg);
- if (!erts_bs_put_utf8(ERL_BITS_ARGS_1(arg))) {
- goto badarg;
- }
- Next(2);
- }
-
- /*
- * Calculate the number of bytes needed to encode the source
- * operarand to UTF-8. If the source operand is invalid (e.g. wrong
- * type or range) we return a nonsense integer result (2 or 4). We
- * can get away with that because we KNOW that bs_put_utf16 will do
- * full error checking.
- */
-
- OpCase(i_bs_utf16_size_sd): {
- Eterm arg;
- Eterm result = make_small(2);
-
- GetArg1(0, arg);
- if (arg >= make_small(0x10000UL)) {
- result = make_small(4);
- }
- StoreBifResult(1, result);
- }
-
- OpCase(bs_put_utf16_jIs): {
- Eterm arg;
-
- GetArg1(2, arg);
- if (!erts_bs_put_utf16(ERL_BITS_ARGS_2(arg, Arg(1)))) {
- goto badarg;
- }
- Next(3);
- }
-
- /*
- * Only used for validating a value about to be stored in a binary.
- */
- OpCase(i_bs_validate_unicode_js): {
- Eterm val;
-
- GetArg1(1, val);
-
- /*
- * There is no need to untag the integer, but it IS necessary
- * to make sure it is small (if the term is a bignum, it could
- * slip through the test, and there is no further test that
- * would catch it, since bit syntax construction silently masks
- * too big numbers).
- */
- if (is_not_small(val) || val > make_small(0x10FFFFUL) ||
- (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL))) {
- goto badarg;
- }
- Next(2);
- }
-
- /*
- * Only used for validating a value matched out.
- */
- OpCase(i_bs_validate_unicode_retract_jss): {
- Eterm i; /* Integer to validate */
-
- /*
- * There is no need to untag the integer, but it IS necessary
- * to make sure it is small (a bignum pointer could fall in
- * the valid range).
- */
-
- GetArg1(1, i);
- if (is_not_small(i) || i > make_small(0x10FFFFUL) ||
- (make_small(0xD800UL) <= i && i <= make_small(0xDFFFUL))) {
- Eterm ms; /* Match context */
- ErlBinMatchBuffer* mb;
-
- GetArg1(2, ms);
- mb = ms_matchbuffer(ms);
- mb->offset -= 32;
- goto badarg;
- }
- Next(3);
- }
-
- /*
- * Matching of binaries.
- */
-
- {
- Eterm header;
- BeamInstr *next;
- Uint slots;
- Eterm context;
-
- do_start_match:
- slots = Arg(2);
- if (!is_boxed(context)) {
- ClauseFail();
- }
- PreFetch(4, next);
- header = *boxed_val(context);
- if (header_is_bin_matchstate(header)) {
- ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context);
- Uint actual_slots = HEADER_NUM_SLOTS(header);
- ms->save_offset[0] = ms->mb.offset;
- if (actual_slots < slots) {
- ErlBinMatchState* dst;
- Uint live = Arg(1);
- Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
-
- TestHeapPreserve(wordsneeded, live, context);
- ms = (ErlBinMatchState *) boxed_val(context);
- dst = (ErlBinMatchState *) HTOP;
- *dst = *ms;
- *HTOP = HEADER_BIN_MATCHSTATE(slots);
- HTOP += wordsneeded;
- HEAP_SPACE_VERIFIED(0);
- StoreResult(make_matchstate(dst), Arg(3));
- }
- } else if (is_binary_header(header)) {
- Eterm result;
- Uint live = Arg(1);
- Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
- TestHeapPreserve(wordsneeded, live, context);
- HEAP_TOP(c_p) = HTOP;
-#ifdef DEBUG
- c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */
-#endif
- result = erts_bs_start_match_2(c_p, context, slots);
- HTOP = HEAP_TOP(c_p);
- HEAP_SPACE_VERIFIED(0);
- if (is_non_value(result)) {
- ClauseFail();
- } else {
- StoreResult(result, Arg(3));
- }
- } else {
- ClauseFail();
- }
- NextPF(4, next);
-
- OpCase(i_bs_start_match2_xfIId): {
- context = xb(Arg(0));
- I++;
- goto do_start_match;
- }
- OpCase(i_bs_start_match2_yfIId): {
- context = yb(Arg(0));
- I++;
- goto do_start_match;
- }
- }
-
- OpCase(bs_test_zero_tail2_fx): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
-
- PreFetch(2, next);
- _mb = (ErlBinMatchBuffer*) ms_matchbuffer(xb(Arg(1)));
- if (_mb->size != _mb->offset) {
- ClauseFail();
- }
- NextPF(2, next);
- }
-
- OpCase(bs_test_tail_imm2_fxI): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
- PreFetch(3, next);
- _mb = ms_matchbuffer(xb(Arg(1)));
- if (_mb->size - _mb->offset != Arg(2)) {
- ClauseFail();
- }
- NextPF(3, next);
- }
-
- OpCase(bs_test_unit_fxI): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
- PreFetch(3, next);
- _mb = ms_matchbuffer(xb(Arg(1)));
- if ((_mb->size - _mb->offset) % Arg(2)) {
- ClauseFail();
- }
- NextPF(3, next);
- }
-
- OpCase(bs_test_unit8_fx): {
- BeamInstr *next;
- ErlBinMatchBuffer *_mb;
- PreFetch(2, next);
- _mb = ms_matchbuffer(xb(Arg(1)));
- if ((_mb->size - _mb->offset) & 7) {
- ClauseFail();
- }
- NextPF(2, next);
- }
-
- {
- Eterm bs_get_integer8_context;
-
- OpCase(i_bs_get_integer_8_xfd): {
- ErlBinMatchBuffer *_mb;
- Eterm _result;
- bs_get_integer8_context = xb(Arg(0));
- I++;
- _mb = ms_matchbuffer(bs_get_integer8_context);
- if (_mb->size - _mb->offset < 8) {
- ClauseFail();
- }
- if (BIT_OFFSET(_mb->offset) != 0) {
- _result = erts_bs_get_integer_2(c_p, 8, 0, _mb);
- } else {
- _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]);
- _mb->offset += 8;
- }
- StoreBifResult(1, _result);
- }
- }
-
- {
- Eterm bs_get_integer_16_context;
-
- OpCase(i_bs_get_integer_16_xfd):
- bs_get_integer_16_context = xb(Arg(0));
- I++;
-
- {
- ErlBinMatchBuffer *_mb;
- Eterm _result;
- _mb = ms_matchbuffer(bs_get_integer_16_context);
- if (_mb->size - _mb->offset < 16) {
- ClauseFail();
- }
- if (BIT_OFFSET(_mb->offset) != 0) {
- _result = erts_bs_get_integer_2(c_p, 16, 0, _mb);
- } else {
- _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset)));
- _mb->offset += 16;
- }
- StoreBifResult(1, _result);
- }
- }
-
- {
- Eterm bs_get_integer_32_context;
-
- OpCase(i_bs_get_integer_32_xfId):
- bs_get_integer_32_context = xb(Arg(0));
- I++;
-
- {
- ErlBinMatchBuffer *_mb;
- Uint32 _integer;
- Eterm _result;
- _mb = ms_matchbuffer(bs_get_integer_32_context);
- if (_mb->size - _mb->offset < 32) { ClauseFail(); }
- if (BIT_OFFSET(_mb->offset) != 0) {
- _integer = erts_bs_get_unaligned_uint32(_mb);
- } else {
- _integer = get_int32(_mb->base + _mb->offset/8);
- }
- _mb->offset += 32;
-#if !defined(ARCH_64)
- if (IS_USMALL(0, _integer)) {
-#endif
- _result = make_small(_integer);
-#if !defined(ARCH_64)
- } else {
- TestHeap(BIG_UINT_HEAP_SIZE, Arg(1));
- _result = uint_to_big((Uint) _integer, HTOP);
- HTOP += BIG_UINT_HEAP_SIZE;
- HEAP_SPACE_VERIFIED(0);
- }
-#endif
- StoreBifResult(2, _result);
- }
- }
-
- {
- Eterm Ms, Sz;
-
- /* Operands: x(Reg) Size Live Fail Flags Dst */
- OpCase(i_bs_get_integer_imm_xIIfId): {
- Uint wordsneeded;
- Ms = xb(Arg(0));
- Sz = Arg(1);
- wordsneeded = 1+WSIZE(NBYTES(Sz));
- TestHeapPreserve(wordsneeded, Arg(2), Ms);
- I += 3;
- /* Operands: Fail Flags Dst */
- goto do_bs_get_integer_imm;
- }
-
- /* Operands: x(Reg) Size Fail Flags Dst */
- OpCase(i_bs_get_integer_small_imm_xIfId): {
- Ms = xb(Arg(0));
- Sz = Arg(1);
- I += 2;
- /* Operands: Fail Flags Dst */
- goto do_bs_get_integer_imm;
- }
-
- /*
- * Ms = match context
- * Sz = size of field
- * Operands: Fail Flags Dst
- */
- do_bs_get_integer_imm: {
- ErlBinMatchBuffer* mb;
- Eterm result;
-
- mb = ms_matchbuffer(Ms);
- LIGHT_SWAPOUT;
- result = erts_bs_get_integer_2(c_p, Sz, Arg(1), mb);
- LIGHT_SWAPIN;
- HEAP_SPACE_VERIFIED(0);
- if (is_non_value(result)) {
- ClauseFail();
- }
- StoreBifResult(2, result);
- }
- }
-
- /*
- * Operands: Fail Live FlagsAndUnit Ms Sz Dst
- */
- OpCase(i_bs_get_integer_fIIssd): {
- Uint flags;
- Uint size;
- Eterm Ms;
- Eterm Sz;
- ErlBinMatchBuffer* mb;
- Eterm result;
-
- flags = Arg(2);
- GetArg2(3, Ms, Sz);
- BsGetFieldSize(Sz, (flags >> 3), ClauseFail(), size);
- if (size >= SMALL_BITS) {
- Uint wordsneeded;
- /* Check bits size before potential gc.
- * We do not want a gc and then realize we don't need
- * the allocated space (i.e. if the op fails).
- *
- * Remember to re-acquire the matchbuffer after gc.
- */
-
- mb = ms_matchbuffer(Ms);
- if (mb->size - mb->offset < size) {
- ClauseFail();
- }
- wordsneeded = 1+WSIZE(NBYTES((Uint) size));
- TestHeapPreserve(wordsneeded, Arg(1), Ms);
- }
- mb = ms_matchbuffer(Ms);
- LIGHT_SWAPOUT;
- result = erts_bs_get_integer_2(c_p, size, flags, mb);
- LIGHT_SWAPIN;
- HEAP_SPACE_VERIFIED(0);
- if (is_non_value(result)) {
- ClauseFail();
- }
- StoreBifResult(5, result);
- }
-
- {
- Eterm get_utf8_context;
-
- /* Operands: MatchContext Fail Dst */
- OpCase(i_bs_get_utf8_xfd): {
- get_utf8_context = xb(Arg(0));
- I++;
- }
-
- /*
- * get_utf8_context = match_context
- * Operands: Fail Dst
- */
-
- {
- Eterm result = erts_bs_get_utf8(ms_matchbuffer(get_utf8_context));
- if (is_non_value(result)) {
- ClauseFail();
- }
- StoreBifResult(1, result);
- }
- }
-
- {
- Eterm get_utf16_context;
-
- /* Operands: MatchContext Fail Flags Dst */
- OpCase(i_bs_get_utf16_xfId): {
- get_utf16_context = xb(Arg(0));
- I++;
- }
-
- /*
- * get_utf16_context = match_context
- * Operands: Fail Flags Dst
- */
- {
- Eterm result = erts_bs_get_utf16(ms_matchbuffer(get_utf16_context),
- Arg(1));
- if (is_non_value(result)) {
- ClauseFail();
- }
- StoreBifResult(2, result);
- }
- }
-
- {
- Eterm context_to_binary_context;
- ErlBinMatchBuffer* mb;
- ErlSubBin* sb;
- Uint size;
- Uint offs;
- Uint orig;
- Uint hole_size;
-
- OpCase(bs_context_to_binary_x):
- context_to_binary_context = xb(Arg(0));
- I--;
-
- if (is_boxed(context_to_binary_context) &&
- header_is_bin_matchstate(*boxed_val(context_to_binary_context))) {
- ErlBinMatchState* ms;
- ms = (ErlBinMatchState *) boxed_val(context_to_binary_context);
- mb = &ms->mb;
- offs = ms->save_offset[0];
- size = mb->size - offs;
- goto do_bs_get_binary_all_reuse_common;
- }
- Next(2);
-
- OpCase(i_bs_get_binary_all_reuse_xfI): {
- context_to_binary_context = xb(Arg(0));
- I++;
- }
-
- mb = ms_matchbuffer(context_to_binary_context);
- size = mb->size - mb->offset;
- if (size % Arg(1) != 0) {
- ClauseFail();
- }
- offs = mb->offset;
-
- do_bs_get_binary_all_reuse_common:
- orig = mb->orig;
- sb = (ErlSubBin *) boxed_val(context_to_binary_context);
- hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE;
- sb->thing_word = HEADER_SUB_BIN;
- sb->size = BYTE_OFFSET(size);
- sb->bitsize = BIT_OFFSET(size);
- sb->offs = BYTE_OFFSET(offs);
- sb->bitoffs = BIT_OFFSET(offs);
- sb->is_writable = 0;
- sb->orig = orig;
- if (hole_size) {
- sb[1].thing_word = make_pos_bignum_header(hole_size-1);
- }
- Next(2);
- }
-
- {
- Eterm match_string_context;
-
- OpCase(i_bs_match_string_xfII): {
- match_string_context = xb(Arg(0));
- I++;
- }
-
- {
- BeamInstr *next;
- byte* bytes;
- Uint bits;
- ErlBinMatchBuffer* mb;
- Uint offs;
-
- PreFetch(3, next);
- bits = Arg(1);
- bytes = (byte *) Arg(2);
- mb = ms_matchbuffer(match_string_context);
- if (mb->size - mb->offset < bits) {
- ClauseFail();
- }
- offs = mb->offset & 7;
- if (offs == 0 && (bits & 7) == 0) {
- if (sys_memcmp(bytes, mb->base+(mb->offset>>3), bits>>3)) {
- ClauseFail();
- }
- } else if (erts_cmp_bits(bytes, 0, mb->base+(mb->offset>>3), mb->offset & 7, bits)) {
- ClauseFail();
- }
- mb->offset += bits;
- NextPF(3, next);
- }
- }
-
- OpCase(i_bs_save2_xI): {
- BeamInstr *next;
- ErlBinMatchState *_ms;
- PreFetch(2, next);
- _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
- _ms->save_offset[Arg(1)] = _ms->mb.offset;
- NextPF(2, next);
- }
-
- OpCase(i_bs_restore2_xI): {
- BeamInstr *next;
- ErlBinMatchState *_ms;
- PreFetch(2, next);
- _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
- _ms->mb.offset = _ms->save_offset[Arg(1)];
- NextPF(2, next);
- }
-
#include "beam_cold.h"
-
- /*
- * This instruction is probably never used (because it is combined with a
- * a return). However, a future compiler might for some reason emit a
- * deallocate not followed by a return, and that should work.
- */
- OpCase(deallocate_I): {
- BeamInstr *next;
-
- PreFetch(1, next);
- D(Arg(0));
- NextPF(1, next);
- }
-
- /*
- * Trace and debugging support.
- */
-
- OpCase(return_trace): {
- BeamInstr* code = (BeamInstr *) (UWord) E[0];
-
- SWAPOUT; /* Needed for shared heap */
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- erts_trace_return(c_p, code, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN;
- c_p->cp = NULL;
- SET_I((BeamInstr *) cp_val(E[2]));
- E += 3;
- Goto(*I);
- }
-
- OpCase(i_generic_breakpoint): {
- BeamInstr real_I;
- ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- HEAVY_SWAPOUT;
- real_I = erts_generic_breakpoint(c_p, I, reg);
- HEAVY_SWAPIN;
- ASSERT(VALID_INSTR(real_I));
- Goto(real_I);
- }
-
- OpCase(i_return_time_trace): {
- BeamInstr *pc = (BeamInstr *) (UWord) E[0];
- SWAPOUT;
- erts_trace_time_return(c_p, pc);
- SWAPIN;
- c_p->cp = NULL;
- SET_I((BeamInstr *) cp_val(E[1]));
- E += 2;
- Goto(*I);
- }
-
- OpCase(i_return_to_trace): {
- if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) {
- Uint *cpp = (Uint*) E;
- for(;;) {
- ASSERT(is_CP(*cpp));
- if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) {
- do ++cpp; while(is_not_CP(*cpp));
- cpp += 2;
- } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) {
- do ++cpp; while(is_not_CP(*cpp));
- } else break;
- }
- SWAPOUT; /* Needed for shared heap */
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- erts_trace_return_to(c_p, cp_val(*cpp));
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN;
- }
- c_p->cp = NULL;
- SET_I((BeamInstr *) cp_val(E[0]));
- E += 1;
- Goto(*I);
- }
-
- /*
- * New floating point instructions.
- */
-
- OpCase(fmove_ql): {
- Eterm fr = Arg(1);
- BeamInstr *next;
-
- PreFetch(2, next);
- GET_DOUBLE(Arg(0), *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
- NextPF(2, next);
- }
-
- OpCase(fmove_dl): {
- Eterm targ1;
- Eterm fr = Arg(1);
- BeamInstr *next;
-
- PreFetch(2, next);
- targ1 = REG_TARGET(Arg(0));
- /* Arg(0) == HEADER_FLONUM */
- GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
- NextPF(2, next);
- }
-
- OpCase(fmove_ld): {
- Eterm fr = Arg(0);
- Eterm dest = make_float(HTOP);
-
- PUT_DOUBLE(*(FloatDef*)ADD_BYTE_OFFSET(freg, fr), HTOP);
- HTOP += FLOAT_SIZE_OBJECT;
- StoreBifResult(1, dest);
- }
-
- OpCase(fconv_dl): {
- Eterm targ1;
- Eterm fr = Arg(1);
- BeamInstr *next;
-
- targ1 = REG_TARGET(Arg(0));
- PreFetch(2, next);
- if (is_small(targ1)) {
- fb(fr) = (double) signed_val(targ1);
- } else if (is_big(targ1)) {
- if (big_to_double(targ1, &fb(fr)) < 0) {
- goto fbadarith;
- }
- } else if (is_float(targ1)) {
- GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
- } else {
- goto fbadarith;
- }
- NextPF(2, next);
- }
-
-#ifdef NO_FPE_SIGNALS
- OpCase(fclearerror):
- OpCase(i_fcheckerror):
- erts_exit(ERTS_ERROR_EXIT, "fclearerror/i_fcheckerror without fpe signals (beam_emu)");
-# define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT
-# define ERTS_NO_FPE_ERROR ERTS_FP_ERROR
-#else
-# define ERTS_NO_FPE_CHECK_INIT(p)
-# define ERTS_NO_FPE_ERROR(p, a, b)
-
- OpCase(fclearerror): {
- BeamInstr *next;
-
- PreFetch(0, next);
- ERTS_FP_CHECK_INIT(c_p);
- NextPF(0, next);
- }
-
- OpCase(i_fcheckerror): {
- BeamInstr *next;
-
- PreFetch(0, next);
- ERTS_FP_ERROR(c_p, freg[0].fd, goto fbadarith);
- NextPF(0, next);
- }
-#endif
-
-
- OpCase(i_fadd_lll): {
- BeamInstr *next;
-
- PreFetch(3, next);
- ERTS_NO_FPE_CHECK_INIT(c_p);
- fb(Arg(2)) = fb(Arg(0)) + fb(Arg(1));
- ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
- NextPF(3, next);
- }
- OpCase(i_fsub_lll): {
- BeamInstr *next;
-
- PreFetch(3, next);
- ERTS_NO_FPE_CHECK_INIT(c_p);
- fb(Arg(2)) = fb(Arg(0)) - fb(Arg(1));
- ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
- NextPF(3, next);
- }
- OpCase(i_fmul_lll): {
- BeamInstr *next;
-
- PreFetch(3, next);
- ERTS_NO_FPE_CHECK_INIT(c_p);
- fb(Arg(2)) = fb(Arg(0)) * fb(Arg(1));
- ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
- NextPF(3, next);
- }
- OpCase(i_fdiv_lll): {
- BeamInstr *next;
-
- PreFetch(3, next);
- ERTS_NO_FPE_CHECK_INIT(c_p);
- fb(Arg(2)) = fb(Arg(0)) / fb(Arg(1));
- ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
- NextPF(3, next);
- }
- OpCase(i_fnegate_ll): {
- BeamInstr *next;
-
- PreFetch(2, next);
- ERTS_NO_FPE_CHECK_INIT(c_p);
- fb(Arg(1)) = -fb(Arg(0));
- ERTS_NO_FPE_ERROR(c_p, fb(Arg(1)), goto fbadarith);
- NextPF(2, next);
-
- fbadarith:
- c_p->freason = BADARITH;
- goto find_func_info;
- }
-
-#ifdef HIPE
- {
-#define HIPE_MODE_SWITCH(Cmd) \
- SWAPOUT; \
- ERTS_DBG_CHK_REDS(c_p, FCALLS); \
- c_p->fcalls = FCALLS; \
- c_p->def_arg_reg[4] = -neg_o_reds; \
- c_p = hipe_mode_switch(c_p, Cmd, reg); \
- goto L_post_hipe_mode_switch
-
- OpCase(hipe_trap_call): {
- /*
- * I[-5]: &&lb_i_func_info_IaaI
- * I[-4]: Native code callee (inserted by HiPE)
- * I[-3]: Module (tagged atom)
- * I[-2]: Function (tagged atom)
- * I[-1]: Arity (untagged integer)
- * I[ 0]: &&lb_hipe_trap_call
- * ... remainder of original BEAM code
- */
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.u.ncallee = (void(*)(void)) I[-4];
- ++hipe_trap_count;
- HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL | (I[-1] << 8));
- }
- OpCase(hipe_trap_call_closure): {
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.u.ncallee = (void(*)(void)) I[-4];
- ++hipe_trap_count;
- HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (I[-1] << 8));
- }
- OpCase(hipe_trap_return): {
- HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_RETURN);
- }
- OpCase(hipe_trap_throw): {
- HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_THROW);
- }
- OpCase(hipe_trap_resume): {
- HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_RESUME);
- }
-#undef HIPE_MODE_SWITCH
-
- L_post_hipe_mode_switch:
-#ifdef DEBUG
- pid = c_p->common.id; /* may have switched process... */
-#endif
- reg = erts_proc_sched_data(c_p)->x_reg_array;
- freg = erts_proc_sched_data(c_p)->f_reg_array;
- ERL_BITS_RELOAD_STATEP(c_p);
- /* XXX: this abuse of def_arg_reg[] is horrid! */
- neg_o_reds = -c_p->def_arg_reg[4];
- FCALLS = c_p->fcalls;
- SWAPIN;
- ERTS_DBG_CHK_REDS(c_p, FCALLS);
- switch( c_p->def_arg_reg[3] ) {
- case HIPE_MODE_SWITCH_RES_RETURN:
- ASSERT(is_value(reg[0]));
- SET_I(c_p->cp);
- c_p->cp = 0;
- Goto(*I);
- case HIPE_MODE_SWITCH_RES_CALL_EXPORTED:
- c_p->i = c_p->hipe.u.callee_exp->addressv[erts_active_code_ix()];
- /*fall through*/
- case HIPE_MODE_SWITCH_RES_CALL_BEAM:
- SET_I(c_p->i);
- Dispatch();
- case HIPE_MODE_SWITCH_RES_CALL_CLOSURE:
- /* This can be used to call any function value, but currently it's
- only used to call closures referring to unloaded modules. */
- {
- BeamInstr *next;
-
- next = call_fun(c_p, c_p->arity - 1, reg, THE_NON_VALUE);
- HEAVY_SWAPIN;
- if (next != NULL) {
- SET_I(next);
- Dispatchfun();
- }
- goto find_func_info;
- }
- case HIPE_MODE_SWITCH_RES_THROW:
- c_p->cp = NULL;
- I = handle_error(c_p, I, reg, NULL);
- goto post_error_handling;
- default:
- erts_exit(ERTS_ERROR_EXIT, "hipe_mode_switch: result %u\n", c_p->def_arg_reg[3]);
- }
- }
- OpCase(hipe_call_count): {
- /*
- * I[-5]: &&lb_i_func_info_IaaI
- * I[-4]: pointer to struct hipe_call_count (inserted by HiPE)
- * I[-3]: Module (tagged atom)
- * I[-2]: Function (tagged atom)
- * I[-1]: Arity (untagged integer)
- * I[ 0]: &&lb_hipe_call_count
- * ... remainder of original BEAM code
- */
- struct hipe_call_count *hcc = (struct hipe_call_count*)I[-4];
- ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
- ASSERT(hcc != NULL);
- ASSERT(VALID_INSTR(hcc->opcode));
- ++(hcc->count);
- Goto(hcc->opcode);
- }
-#endif /* HIPE */
-
- OpCase(i_yield):
- {
- /* This is safe as long as REDS_IN(c_p) is never stored
- * in c_p->arg_reg[0]. It is currently stored in c_p->def_arg_reg[5],
- * which may be c_p->arg_reg[5], which is close, but no banana.
- */
- c_p->arg_reg[0] = am_true;
- c_p->arity = 1; /* One living register (the 'true' return value) */
- SWAPOUT;
- c_p->i = I + 1; /* Next instruction */
- c_p->current = NULL;
- goto do_schedule;
- }
-
- OpCase(i_hibernate): {
- HEAVY_SWAPOUT;
- if (erts_hibernate(c_p, r(0), x(1), x(2), reg)) {
- FCALLS = c_p->fcalls;
- c_p->flags &= ~F_HIBERNATE_SCHED;
- goto do_schedule;
- } else {
- HEAVY_SWAPIN;
- I = handle_error(c_p, I, reg, hibernate_3);
- goto post_error_handling;
- }
- }
-
- /* This is optimised as an instruction because
- it has to be very very fast */
- OpCase(i_perf_counter): {
- BeamInstr* next;
- ErtsSysPerfCounter ts;
- PreFetch(0, next);
-
- ts = erts_sys_perf_counter();
-
- if (IS_SSMALL(ts)) {
- r(0) = make_small((Sint)ts);
- } else {
- TestHeap(ERTS_SINT64_HEAP_SIZE(ts),0);
- r(0) = make_big(HTOP);
-#if defined(ARCH_32) || HALFWORD_HEAP
- if (ts >= (((Uint64) 1) << 32)) {
- *HTOP = make_pos_bignum_header(2);
- BIG_DIGIT(HTOP, 0) = (Uint) (ts & ((Uint) 0xffffffff));
- BIG_DIGIT(HTOP, 1) = (Uint) ((ts >> 32) & ((Uint) 0xffffffff));
- HTOP += 3;
- }
- else
-#endif
- {
- *HTOP = make_pos_bignum_header(1);
- BIG_DIGIT(HTOP, 0) = (Uint) ts;
- HTOP += 2;
- }
- }
- NextPF(0, next);
- }
-
- OpCase(i_debug_breakpoint): {
- HEAVY_SWAPOUT;
- I = call_error_handler(c_p, I-3, reg, am_breakpoint);
- HEAVY_SWAPIN;
- if (I) {
- Goto(*I);
- }
- goto handle_error;
- }
-
-
- OpCase(system_limit_j):
- system_limit:
- c_p->freason = SYSTEM_LIMIT;
- goto lb_Cl_error;
-
-
#ifdef ERTS_OPCODE_COUNTER_SUPPORT
DEFINE_COUNTING_LABELS;
#endif
@@ -5102,9 +968,6 @@ do { \
init_emulator:
{
- int i;
- Export* ep;
-
#ifndef NO_JUMP_TABLE
#ifdef ERTS_OPCODE_COUNTER_SUPPORT
#ifdef DEBUG
@@ -5116,34 +979,8 @@ do { \
beam_ops = opcodes;
#endif /* ERTS_OPCODE_COUNTER_SUPPORT */
#endif /* NO_JUMP_TABLE */
-
- em_call_error_handler = OpCode(call_error_handler);
- em_apply_bif = OpCode(apply_bif);
- em_call_nif = OpCode(call_nif);
-
- beam_apply[0] = (BeamInstr) OpCode(i_apply);
- beam_apply[1] = (BeamInstr) OpCode(normal_exit);
- beam_exit[0] = (BeamInstr) OpCode(error_action_code);
- beam_continue_exit[0] = (BeamInstr) OpCode(continue_exit);
- beam_return_to_trace[0] = (BeamInstr) OpCode(i_return_to_trace);
- beam_return_trace[0] = (BeamInstr) OpCode(return_trace);
- beam_exception_trace[0] = (BeamInstr) OpCode(return_trace); /* UGLY */
- beam_return_time_trace[0] = (BeamInstr) OpCode(i_return_time_trace);
-
- /*
- * Enter all BIFs into the export table.
- */
- for (i = 0; i < BIF_SIZE; i++) {
- ep = erts_export_put(bif_table[i].module,
- bif_table[i].name,
- bif_table[i].arity);
- bif_export[i] = ep;
- ep->code[3] = (BeamInstr) OpCode(apply_bif);
- ep->code[4] = (BeamInstr) bif_table[i].f;
- /* XXX: set func info for bifs */
- ep->fake_op_func_info_for_hipe[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
- }
+ init_emulator_finish();
return;
}
#ifdef NO_JUMP_TABLE
@@ -5155,26 +992,71 @@ do { \
save_calls1:
{
- Eterm* dis_next;
+ BeamInstr dis_next;
save_calls(c_p, (Export *) Arg(0));
SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]);
- dis_next = (Eterm *) *I;
+ dis_next = *I;
FCALLS--;
Goto(dis_next);
}
}
/*
+ * One-time initialization of emulator. Does not need to be
+ * in process_main().
+ */
+static void
+init_emulator_finish(void)
+{
+ int i;
+ Export* ep;
+
+#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
+ for (i = 0; i < NUMBER_OF_OPCODES; i++) {
+ BeamInstr instr = BeamOpCodeAddr(i);
+ if (instr >= (1ull << 32)) {
+ erts_exit(ERTS_ERROR_EXIT,
+ "This run-time was supposed be compiled with all code below 2Gb,\n"
+ "but the instruction '%s' is located at %016lx.\n",
+ opc[i].name, instr);
+ }
+ }
+#endif
+
+ beam_apply[0] = BeamOpCodeAddr(op_i_apply);
+ beam_apply[1] = BeamOpCodeAddr(op_normal_exit);
+ beam_exit[0] = BeamOpCodeAddr(op_error_action_code);
+ beam_continue_exit[0] = BeamOpCodeAddr(op_continue_exit);
+ beam_return_to_trace[0] = BeamOpCodeAddr(op_i_return_to_trace);
+ beam_return_trace[0] = BeamOpCodeAddr(op_return_trace);
+ beam_exception_trace[0] = BeamOpCodeAddr(op_return_trace); /* UGLY */
+ beam_return_time_trace[0] = BeamOpCodeAddr(op_i_return_time_trace);
+
+ /*
+ * Enter all BIFs into the export table.
+ */
+ for (i = 0; i < BIF_SIZE; i++) {
+ ep = erts_export_put(bif_table[i].module,
+ bif_table[i].name,
+ bif_table[i].arity);
+ bif_export[i] = ep;
+ ep->beam[0] = BeamOpCodeAddr(op_apply_bif);
+ ep->beam[1] = (BeamInstr) bif_table[i].f;
+ /* XXX: set func info for bifs */
+ ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI);
+ }
+}
+
+/*
* erts_dirty_process_main() is what dirty schedulers execute. Since they handle
* only NIF calls they do not need to be able to execute all BEAM
* instructions.
*/
void erts_dirty_process_main(ErtsSchedulerData *esdp)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
Process* c_p = NULL;
ErtsMonotonicTime start_time;
#ifdef DEBUG
@@ -5221,8 +1103,8 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
goto do_dirty_schedule;
context_switch:
- c_p->arity = I[-1];
- c_p->current = I-3; /* Pointer to Mod, Func, Arity */
+ c_p->current = erts_code_to_codemfa(I); /* Pointer to Mod, Func, Arity */
+ c_p->arity = c_p->current->arity;
{
int reds_used;
@@ -5285,8 +1167,11 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
reds_used = treds > INT_MAX ? INT_MAX : (int) treds;
}
+ if (c_p && ERTS_PROC_GET_PENDING_SUSPEND(c_p))
+ erts_proc_sig_handle_pending_suspend(c_p);
+
PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
c_p = erts_schedule(esdp, c_p, reds_used);
@@ -5300,16 +1185,31 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
#ifdef DEBUG
pid = c_p->common.id; /* Save for debugging purposes */
#endif
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!(c_p->flags & F_HIPE_MODE));
ERTS_MSACC_UPDATE_CACHE_X();
- reg = esdp->x_reg_array;
- {
+ /*
+ * Set fcalls even though we ignore it, so we don't
+ * confuse code accessing it...
+ */
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ c_p->fcalls = 0;
+ else
+ c_p->fcalls = CONTEXT_REDS;
+
+ if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) {
+ erts_execute_dirty_system_task(c_p);
+ goto do_dirty_schedule;
+ }
+ else {
+ ErtsCodeMFA *codemfa;
Eterm* argp;
- int i;
+ int i, exiting;
+
+ reg = esdp->x_reg_array;
argp = c_p->arg_reg;
for (i = c_p->arity - 1; i >= 0; i--) {
@@ -5325,17 +1225,6 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
I = c_p->i;
- ASSERT(BeamOp(op_call_nif) == (BeamInstr *) *I);
-
- /*
- * Set fcalls even though we ignore it, so we don't
- * confuse code accessing it...
- */
- if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
- c_p->fcalls = 0;
- else
- c_p->fcalls = CONTEXT_REDS;
-
SWAPIN;
#ifdef USE_VM_PROBES
@@ -5345,13 +1234,11 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
dtrace_proc_str(c_p, process_buf);
if (ERTS_PROC_IS_EXITING(c_p)) {
- strcpy(fun_buf, "<exiting>");
+ sys_strcpy(fun_buf, "<exiting>");
} else {
- BeamInstr *fptr = find_function_from_pc(c_p->i);
- if (fptr) {
- dtrace_fun_decode(c_p, (Eterm)fptr[0],
- (Eterm)fptr[1], (Uint)fptr[2],
- NULL, fun_buf);
+ ErtsCodeMFA *cmfa = find_function_from_pc(c_p->i);
+ if (cmfa) {
+ dtrace_fun_decode(c_p, cmfa, NULL, fun_buf);
} else {
erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
"<unknown/%p>", *I);
@@ -5361,107 +1248,80 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
DTRACE2(process_scheduled, process_buf, fun_buf);
}
#endif
- }
-
- {
-#ifdef DEBUG
- Eterm result;
-#endif
- Eterm arity;
-
- {
- /*
- * call_nif is always first instruction in function:
- *
- * I[-3]: Module
- * I[-2]: Function
- * I[-1]: Arity
- * I[0]: &&call_nif
- * I[1]: Function pointer to NIF function
- * I[2]: Pointer to erl_module_nif
- * I[3]: Function pointer to dirty NIF
- */
- BifFunction vbf;
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
+ /*
+ * call_nif is always first instruction in function:
+ *
+ * I[-3]: Module
+ * I[-2]: Function
+ * I[-1]: Arity
+ * I[0]: &&call_nif
+ * I[1]: Function pointer to NIF function
+ * I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
+ *
+ * This layout is determined by the NifExport struct
+ */
- DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- c_p->current = I-3; /* current and vbf set to please handle_error */
- SWAPOUT;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- arity = I[-1];
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- {
- typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
- NifF* fp = vbf = (NifF*) I[1];
- struct enif_environment_t env;
- ASSERT(!c_p->scheduler_data);
+ codemfa = erts_code_to_codemfa(I);
- erts_pre_dirty_nif(esdp, &env, c_p,
- (struct erl_module_nif*)I[2]);
+ DTRACE_NIF_ENTRY(c_p, codemfa);
+ c_p->current = codemfa;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
-#ifdef DEBUG
- result =
-#else
- (void)
-#endif
- (*fp)(&env, arity, reg);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ if (BeamIsOpCode(*I, op_apply_bif)) {
+ exiting = erts_call_dirty_bif(esdp, c_p, I, reg);
+ }
+ else {
+ ASSERT(BeamIsOpCode(*I, op_call_nif));
+ exiting = erts_call_dirty_nif(esdp, c_p, I, reg);
+ }
- erts_post_dirty_nif(&env);
+ ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
- ASSERT(!is_value(result));
- ASSERT(c_p->freason == TRAP);
- ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ if (exiting)
+ goto do_dirty_schedule;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
- if (env.exiting)
- goto do_dirty_schedule;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- }
+ DTRACE_NIF_RETURN(c_p, codemfa);
+ ERTS_HOLE_CHECK(c_p);
+ SWAPIN;
+ I = c_p->i;
+ goto context_switch;
+ }
+}
- DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
- ERTS_HOLE_CHECK(c_p);
- SWAPIN;
- I = c_p->i;
- goto context_switch;
- }
+static ErtsCodeMFA *
+gcbif2mfa(void* gcf)
+{
+ int i;
+ for (i = 0; erts_gc_bifs[i].bif; i++) {
+ if (erts_gc_bifs[i].gc_bif == gcf)
+ return &bif_export[erts_gc_bifs[i].exp_ix]->info.mfa;
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
+ erts_exit(ERTS_ERROR_EXIT, "bad gc bif");
+ return NULL;
}
-static BifFunction
-translate_gc_bif(void* gcf)
+static ErtsCodeMFA *
+ubif2mfa(void* uf)
{
- if (gcf == erts_gc_length_1) {
- return length_1;
- } else if (gcf == erts_gc_size_1) {
- return size_1;
- } else if (gcf == erts_gc_bit_size_1) {
- return bit_size_1;
- } else if (gcf == erts_gc_byte_size_1) {
- return byte_size_1;
- } else if (gcf == erts_gc_map_size_1) {
- return map_size_1;
- } else if (gcf == erts_gc_abs_1) {
- return abs_1;
- } else if (gcf == erts_gc_float_1) {
- return float_1;
- } else if (gcf == erts_gc_round_1) {
- return round_1;
- } else if (gcf == erts_gc_trunc_1) {
- return round_1;
- } else if (gcf == erts_gc_binary_part_2) {
- return binary_part_2;
- } else if (gcf == erts_gc_binary_part_3) {
- return binary_part_3;
- } else {
- erts_exit(ERTS_ERROR_EXIT, "bad gc bif");
+ int i;
+ for (i = 0; erts_u_bifs[i].bif; i++) {
+ if (erts_u_bifs[i].bif == uf)
+ return &bif_export[erts_u_bifs[i].exp_ix]->info.mfa;
}
+ erts_exit(ERTS_ERROR_EXIT, "bad u bif");
+ return NULL;
}
/*
@@ -5520,15 +1380,27 @@ Eterm error_atom[NUMBER_EXIT_CODES] = {
*/
static BeamInstr*
-handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
+handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa)
{
Eterm* hp;
Eterm Value = c_p->fvalue;
Eterm Args = am_true;
- c_p->i = pc; /* In case we call erts_exit(). */
ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
+ if (c_p->freason & EXF_RESTORE_NIF)
+ erts_nif_export_restore_error(c_p, &pc, reg, &bif_mfa);
+
+#ifdef DEBUG
+ if (bif_mfa) {
+ /* Verify that bif_mfa does not point into our nif export */
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ ASSERT(!nep || !ErtsInArea(bif_mfa, (char *)nep, sizeof(NifExport)));
+ }
+#endif
+
+ c_p->i = pc; /* In case we call erts_exit(). */
+
/*
* Check if we have an arglist for the top level call. If so, this
* is encoded in Value, so we have to dig out the real Value as well
@@ -5551,7 +1423,7 @@ handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
* more modular.
*/
if (c_p->freason & EXF_SAVETRACE) {
- save_stacktrace(c_p, pc, reg, bf, Args);
+ save_stacktrace(c_p, pc, reg, bif_mfa, Args);
}
/*
@@ -5587,13 +1459,14 @@ handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf)
reg[3] = c_p->ftrace;
if ((new_pc = next_catch(c_p, reg))) {
c_p->cp = 0; /* To avoid keeping stale references. */
+ ERTS_RECV_MARK_CLEAR(c_p); /* No longer safe to use this position */
return new_pc;
}
if (c_p->catches > 0) erts_exit(ERTS_ERROR_EXIT, "Catch not found");
}
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
terminate_proc(c_p, Value);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
return NULL;
}
@@ -5621,7 +1494,8 @@ next_catch(Process* c_p, Eterm *reg) {
/* Can not follow cp here - code may be unloaded */
BeamInstr *cpp = c_p->cp;
if (cpp == beam_exception_trace) {
- erts_trace_exception(c_p, cp_val(ptr[0]),
+ ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
+ erts_trace_exception(c_p, mfa,
reg[1], reg[2],
ERTS_TRACER_FROM_ETERM(ptr+1));
/* Skip return_trace parameters */
@@ -5649,7 +1523,8 @@ next_catch(Process* c_p, Eterm *reg) {
if (is_catch(*ptr) && active_catches) goto found_catch;
}
if (cp_val(*prev) == beam_exception_trace) {
- erts_trace_exception(c_p, cp_val(ptr[0]),
+ ErtsCodeMFA *mfa = (ErtsCodeMFA*)cp_val(ptr[0]);
+ erts_trace_exception(c_p, mfa,
reg[1], reg[2],
ERTS_TRACER_FROM_ETERM(ptr+1));
}
@@ -5824,11 +1699,12 @@ expand_error_value(Process* c_p, Uint freason, Eterm Value) {
*/
static void
-save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
- Eterm args) {
+save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
+ ErtsCodeMFA *bif_mfa, Eterm args) {
struct StackTrace* s;
int sz;
int depth = erts_backtrace_depth; /* max depth (never negative) */
+
if (depth > 0) {
/* There will always be a current function */
depth --;
@@ -5844,33 +1720,30 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
s->depth = 0;
/*
- * If the failure was in a BIF other than 'error', 'exit' or
- * 'throw', find the bif-table index and save the argument
+ * If the failure was in a BIF other than 'error/1', 'error/2',
+ * 'exit/1' or 'throw/1', save BIF-MFA and save the argument
* registers by consing up an arglist.
*/
- if (bf != NULL && bf != error_1 && bf != error_2 &&
- bf != exit_1 && bf != throw_1) {
- int i;
- int a = 0;
- for (i = 0; i < BIF_SIZE; i++) {
- if (bf == bif_table[i].f || bf == bif_table[i].traced) {
- Export *ep = bif_export[i];
- s->current = ep->code;
- a = bif_table[i].arity;
+ if (bif_mfa) {
+ if (bif_mfa->module == am_erlang) {
+ switch (bif_mfa->function) {
+ case am_error:
+ if (bif_mfa->arity == 1 || bif_mfa->arity == 2)
+ goto non_bif_stacktrace;
+ break;
+ case am_exit:
+ if (bif_mfa->arity == 1)
+ goto non_bif_stacktrace;
+ break;
+ case am_throw:
+ if (bif_mfa->arity == 1)
+ goto non_bif_stacktrace;
+ break;
+ default:
break;
}
}
- if (i >= BIF_SIZE) {
- /*
- * The Bif does not really exist (no BIF entry). It is a
- * TRAP and traps are called through apply_bif, which also
- * sets c_p->current (luckily).
- * OR it is a NIF called by call_nif where current is also set.
- */
- ASSERT(c_p->current);
- s->current = c_p->current;
- a = s->current[2];
- }
+ s->current = bif_mfa;
/* Save first stack entry */
ASSERT(pc);
if (depth > 0) {
@@ -5883,8 +1756,11 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
depth--;
}
s->pc = NULL;
- args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
+ args = make_arglist(c_p, reg, bif_mfa->arity); /* Overwrite CAR(c_p->ftrace) */
} else {
+
+ non_bif_stacktrace:
+
s->current = c_p->current;
/*
* For a function_clause error, the arguments are in the beam
@@ -5894,7 +1770,7 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
(GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) ) {
int a;
ASSERT(s->current);
- a = s->current[2];
+ a = s->current->arity;
args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
/* Save first stack entry */
ASSERT(c_p->cp);
@@ -5945,12 +1821,30 @@ erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
p->cp) {
/* Cannot follow cp here - code may be unloaded */
BeamInstr *cpp = p->cp;
+ int trace_cp;
if (cpp == beam_exception_trace || cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
+ trace_cp = 1;
} else if (cpp == beam_return_to_trace) {
/* Skip return_to_trace parameters */
ptr += 1;
+ trace_cp = 1;
+ }
+ else {
+ trace_cp = 0;
+ }
+ if (trace_cp && s->pc == cpp) {
+ /*
+ * If process 'cp' points to a return/exception trace
+ * instruction and 'cp' has been saved as 'pc' in
+ * stacktrace, we need to update 'pc' in stacktrace
+ * with the actual 'cp' located on the top of the
+ * stack; otherwise, we will lose the top stackframe
+ * when building the stack trace.
+ */
+ ASSERT(is_CP(p->stop[0]));
+ s->pc = cp_val(p->stop[0]);
}
}
while (ptr < STACK_START(p) && depth > 0) {
@@ -6065,17 +1959,20 @@ build_stacktrace(Process* c_p, Eterm exc) {
erts_lookup_function_info(&fi, s->pc, 1);
} else if (GET_EXC_INDEX(s->freason) ==
GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) {
- erts_lookup_function_info(&fi, s->current, 1);
+ erts_lookup_function_info(&fi, erts_codemfa_to_code(s->current), 1);
} else {
erts_set_current_function(&fi, s->current);
}
+ depth = s->depth;
/*
- * If fi.current is still NULL, default to the initial function
+ * If fi.current is still NULL, and we have no
+ * stack at all, default to the initial function
* (e.g. spawn_link(erlang, abs, [1])).
*/
- if (fi.current == NULL) {
- erts_set_current_function(&fi, c_p->u.initial);
+ if (fi.mfa == NULL) {
+ if (depth <= 0)
+ erts_set_current_function(&fi, &c_p->u.initial);
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
@@ -6085,13 +1982,12 @@ build_stacktrace(Process* c_p, Eterm exc) {
* Look up all saved continuation pointers and calculate
* needed heap space.
*/
- depth = s->depth;
stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
depth*sizeof(FunctionInfo));
- heap_size = fi.needed + 2;
+ heap_size = fi.mfa ? fi.needed + 2 : 0;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
- if (stkp->current) {
+ if (stkp->mfa) {
heap_size += stkp->needed + 2;
stkp++;
}
@@ -6107,15 +2003,17 @@ build_stacktrace(Process* c_p, Eterm exc) {
res = CONS(hp, mfa, res);
hp += 2;
}
- hp = erts_build_mfa_item(&fi, hp, args, &mfa);
- res = CONS(hp, mfa, res);
+ if (fi.mfa) {
+ hp = erts_build_mfa_item(&fi, hp, args, &mfa);
+ res = CONS(hp, mfa, res);
+ }
erts_free(ERTS_ALC_T_TMP, (void *) stk);
return res;
}
static BeamInstr*
-call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
+call_error_handler(Process* p, ErtsCodeMFA* mfa, Eterm* reg, Eterm func)
{
Eterm* hp;
Export* ep;
@@ -6124,13 +2022,14 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
Uint sz;
int i;
+ DBG_TRACE_MFA_P(mfa, "call_error_handler");
/*
* Search for the error_handler module.
*/
ep = erts_find_function(erts_proc_get_error_handler(p), func, 3,
erts_active_code_ix());
if (ep == NULL) { /* No error handler */
- p->current = fi;
+ p->current = mfa;
p->freason = EXC_UNDEF;
return 0;
}
@@ -6139,7 +2038,7 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
* Create a list with all arguments in the x registers.
*/
- arity = fi[2];
+ arity = mfa->arity;
sz = 2 * arity;
if (HeapWordsLeft(p) < sz) {
erts_garbage_collect(p, sz, reg, arity);
@@ -6155,8 +2054,8 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
/*
* Set up registers for call to error_handler:<func>/3.
*/
- reg[0] = fi[0];
- reg[1] = fi[1];
+ reg[0] = mfa->module;
+ reg[1] = mfa->function;
reg[2] = args;
return ep->addressv[erts_active_code_ix()];
}
@@ -6205,12 +2104,112 @@ apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity,
return ep;
}
+static ERTS_INLINE void
+apply_bif_error_adjustment(Process *p, Export *ep,
+ Eterm *reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
+{
+ /*
+ * I is only set when the apply is a tail call, i.e.,
+ * from the instructions i_apply_only, i_apply_last_P,
+ * and apply_last_IP.
+ */
+ if (I
+ && BeamIsOpCode(ep->beam[0], op_apply_bif)
+ && (ep == bif_export[BIF_error_1]
+ || ep == bif_export[BIF_error_2]
+ || ep == bif_export[BIF_exit_1]
+ || ep == bif_export[BIF_throw_1])) {
+ /*
+ * We are about to tail apply one of the BIFs
+ * erlang:error/1, erlang:error/2, erlang:exit/1,
+ * or erlang:throw/1. Error handling of these BIFs is
+ * special!
+ *
+ * We need 'p->cp' to point into the calling
+ * function when handling the error after the BIF has
+ * been applied. This in order to get the topmost
+ * stackframe correct. Without the following adjustment,
+ * 'p->cp' will point into the function that called
+ * current function when handling the error. We add a
+ * dummy stackframe in order to achieve this.
+ *
+ * Note that these BIFs unconditionally will cause
+ * an exception to be raised. That is, our modifications
+ * of 'p->cp' as well as the stack will be corrected by
+ * the error handling code.
+ *
+ * If we find an exception/return-to trace continuation
+ * pointer as the topmost continuation pointer, we do not
+ * need to do anything since the information already will
+ * be available for generation of the stacktrace.
+ */
+ int apply_only = stack_offset == 0;
+ BeamInstr *cpp;
+
+ if (apply_only) {
+ ASSERT(p->cp != NULL);
+ cpp = p->cp;
+ }
+ else {
+ ASSERT(is_CP(p->stop[0]));
+ cpp = cp_val(p->stop[0]);
+ }
+
+ if (cpp != beam_exception_trace
+ && cpp != beam_return_trace
+ && cpp != beam_return_to_trace) {
+ Uint need = stack_offset /* bytes */ / sizeof(Eterm);
+ if (need == 0)
+ need = 1; /* i_apply_only */
+ if (p->stop - p->htop < need)
+ erts_garbage_collect(p, (int) need, reg, arity+1);
+ p->stop -= need;
+
+ if (apply_only) {
+ /*
+ * Called from the i_apply_only instruction.
+ *
+ * 'p->cp' contains continuation pointer pointing
+ * into the function that called current function.
+ * We push that continuation pointer onto the stack,
+ * and set 'p->cp' to point into current function.
+ */
+
+ p->stop[0] = make_cp(p->cp);
+ p->cp = I;
+ }
+ else {
+ /*
+ * Called from an i_apply_last_p, or apply_last_IP,
+ * instruction.
+ *
+ * Calling instruction will after we return read
+ * a continuation pointer from the stack and write
+ * it to 'p->cp', and then remove the topmost
+ * stackframe of size 'stack_offset'.
+ *
+ * We have sized the dummy-stackframe so that it
+ * will be removed by the instruction we currently
+ * are executing, and leave the stackframe that
+ * normally would have been removed intact.
+ *
+ */
+ p->stop[0] = make_cp(I);
+ }
+ }
+ }
+}
+
static BeamInstr*
-apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
+apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset)
{
int arity;
Export* ep;
- Eterm tmp, this;
+ Eterm tmp;
+ Eterm module = reg[0];
+ Eterm function = reg[1];
+ Eterm args = reg[2];
/*
* Check the arguments which should be of the form apply(Module,
@@ -6231,26 +2230,45 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
return 0;
}
- /* The module argument may be either an atom or an abstract module
- * (currently implemented using tuples, but this might change).
- */
- this = THE_NON_VALUE;
- if (is_not_atom(module)) {
- Eterm* tp;
-
- if (is_not_tuple(module)) goto error;
- tp = tuple_val(module);
- if (arityval(tp[0]) < 1) goto error;
- this = module;
- module = tp[1];
- if (is_not_atom(module)) goto error;
+ while (1) {
+ Eterm m, f, a;
+
+ if (is_not_atom(module)) goto error;
+
+ if (module != am_erlang || function != am_apply)
+ break;
+
+ /* Adjust for multiple apply of apply/3... */
+
+ a = args;
+ if (is_list(a)) {
+ Eterm *consp = list_val(a);
+ m = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ f = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ a = CAR(consp);
+ if (is_nil(CDR(consp))) {
+ /* erlang:apply/3 */
+ module = m;
+ function = f;
+ args = a;
+ if (is_not_atom(f))
+ goto error;
+ continue;
+ }
+ }
+ }
+ }
+ break; /* != erlang:apply/3 */
}
-
/*
* Walk down the 3rd parameter of apply (the argument list) and copy
- * the parameters to the x registers (reg[]). If the module argument
- * was an abstract module, add 1 to the function arity and put the
- * module argument in the n+1st x register as a THIS reference.
+ * the parameters to the x registers (reg[]).
*/
tmp = args;
@@ -6267,9 +2285,6 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
if (is_not_nil(tmp)) { /* Must be well-formed list */
goto error;
}
- if (this != THE_NON_VALUE) {
- reg[arity++] = this;
- }
/*
* Get the index into the export table, or failing that the export
@@ -6283,12 +2298,14 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
static BeamInstr*
-fixed_apply(Process* p, Eterm* reg, Uint arity)
+fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
{
Export* ep;
Eterm module;
@@ -6306,17 +2323,11 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
return 0;
}
- /* The module argument may be either an atom or an abstract module
- * (currently implemented using tuples, but this might change).
- */
- if (is_not_atom(module)) {
- Eterm* tp;
- if (is_not_tuple(module)) goto error;
- tp = tuple_val(module);
- if (arityval(tp[0]) < 1) goto error;
- module = tp[1];
- if (is_not_atom(module)) goto error;
- ++arity;
+ if (is_not_atom(module)) goto error;
+
+ /* Handle apply of apply/3... */
+ if (module == am_erlang && function == am_apply && arity == 3) {
+ return apply(p, reg, I, stack_offset);
}
/*
@@ -6332,32 +2343,19 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
int
-erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg)
+erts_hibernate(Process* c_p, Eterm* reg)
{
int arity;
Eterm tmp;
-
-#ifndef ERTS_SMP
- if (ERTS_PROC_IS_EXITING(c_p)) {
- /*
- * I non smp case:
- *
- * Currently executing process might be sent an exit
- * signal if it is traced by a port that it also is
- * linked to, and the port terminates during the
- * trace. In this case we do *not* want to clear
- * the active flag, which will make the process hang
- * in limbo forever. Get out of here and terminate
- * the process...
- */
- return -1;
- }
-#endif
+ Eterm module = reg[0];
+ Eterm function = reg[1];
+ Eterm args = reg[2];
if (is_not_atom(module) || is_not_atom(function)) {
/*
@@ -6401,11 +2399,11 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_hibernate)) {
+ ErtsCodeMFA cmfa = { module, function, arity};
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE);
- dtrace_fun_decode(c_p, module, function, arity,
- process_name, mfa);
- DTRACE2(process_hibernate, process_name, mfa);
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);
+ dtrace_fun_decode(c_p, &cmfa, process_name, mfa_buf);
+ DTRACE2(process_hibernate, process_name, mfa_buf);
}
#endif
/*
@@ -6425,34 +2423,21 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
* If there are no waiting messages, garbage collect and
* shrink the heap.
*/
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
- if (!c_p->msg.len) {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ if (!erts_proc_sig_fetch(c_p)) {
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
c_p->fvalue = NIL;
PROCESS_MAIN_CHK_LOCKS(c_p);
erts_garbage_collect_hibernate(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
-#ifndef ERTS_SMP
- if (ERTS_PROC_IS_EXITING(c_p)) {
- /*
- * See comment in the begining of the function...
- *
- * This second test is needed since gc might be traced.
- */
- return -1;
- }
-#else /* ERTS_SMP */
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
- if (!c_p->msg.len)
-#endif
- erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ if (!erts_proc_sig_fetch(c_p))
+ erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
}
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- c_p->current = bif_export[BIF_hibernate_3]->code;
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ c_p->current = &bif_export[BIF_hibernate_3]->info.mfa;
c_p->flags |= F_HIBERNATE_SCHED; /* Needed also when woken! */
return 1;
}
@@ -6475,21 +2460,15 @@ call_fun(Process* p, /* Current process. */
if (is_fun_header(hdr)) {
ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
- ErlFunEntry* fe;
- BeamInstr* code_ptr;
+ ErlFunEntry* fe = funp->fe;
+ BeamInstr* code_ptr = fe->address;
Eterm* var_ptr;
- int actual_arity;
- unsigned num_free;
-
- fe = funp->fe;
- num_free = funp->num_free;
- code_ptr = fe->address;
- actual_arity = (int) code_ptr[-1];
+ unsigned num_free = funp->num_free;
+ ErtsCodeMFA *mfa = erts_code_to_codemfa(code_ptr);
+ int actual_arity = mfa->arity;
if (actual_arity == arity+num_free) {
- DTRACE_LOCAL_CALL(p, (Eterm)code_ptr[-3],
- (Eterm)code_ptr[-2],
- code_ptr[-1]);
+ DTRACE_LOCAL_CALL(p, mfa);
if (num_free == 0) {
return code_ptr;
} else {
@@ -6547,7 +2526,7 @@ call_fun(Process* p, /* Current process. */
module = fe->module;
- ERTS_SMP_READ_MEMORY_BARRIER;
+ ERTS_THR_READ_MEMORY_BARRIER;
if (fe->pend_purge_address) {
/*
* The system is currently trying to purge the
@@ -6555,7 +2534,7 @@ call_fun(Process* p, /* Current process. */
* and let it try again when the purge operation is
* done (may succeed or not).
*/
- ep = erts_suspend_process_on_pending_purge_lambda(p);
+ ep = erts_suspend_process_on_pending_purge_lambda(p, fe);
ASSERT(ep);
}
else {
@@ -6594,10 +2573,10 @@ call_fun(Process* p, /* Current process. */
int actual_arity;
ep = *((Export **) (export_val(fun) + 1));
- actual_arity = (int) ep->code[2];
+ actual_arity = ep->info.mfa.arity;
if (arity == actual_arity) {
- DTRACE_GLOBAL_CALL(p, ep->code[0], ep->code[1], (Uint)ep->code[2]);
+ DTRACE_GLOBAL_CALL(p, &ep->info.mfa);
return ep->addressv[erts_active_code_ix()];
} else {
/*
@@ -6651,7 +2630,7 @@ apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
}
if (is_not_nil(tmp)) { /* Must be well-formed list */
- p->freason = EXC_UNDEF;
+ p->freason = EXC_BADARG;
return NULL;
}
reg[arity] = fun;
@@ -6685,9 +2664,6 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
funp->fe = fe;
funp->num_free = num_free;
funp->creator = p->common.id;
-#ifdef HIPE
- funp->native_address = fe->native_address;
-#endif
funp->arity = (int)fe->address[-1] - num_free;
for (i = 0; i < num_free; i++) {
*hp++ = reg[i];
@@ -6784,24 +2760,20 @@ do { \
static Eterm
-new_map(Process* p, Eterm* reg, BeamInstr* I)
+erts_gc_new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr)
{
- Uint n = Arg(3);
Uint i;
Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */;
Eterm keys;
Eterm *mhp,*thp;
Eterm *E;
- BeamInstr *ptr;
flatmap_t *mp;
ErtsHeapFactory factory;
- ptr = &Arg(4);
-
if (n > 2*MAP_SMALL_MAP_LIMIT) {
Eterm res;
if (HeapWordsLeft(p) < n) {
- erts_garbage_collect(p, n, reg, Arg(2));
+ erts_garbage_collect(p, n, reg, live);
}
mhp = p->htop;
@@ -6822,7 +2794,7 @@ new_map(Process* p, Eterm* reg, BeamInstr* I)
}
if (HeapWordsLeft(p) < need) {
- erts_garbage_collect(p, need, reg, Arg(2));
+ erts_garbage_collect(p, need, reg, live);
}
thp = p->htop;
@@ -6845,9 +2817,44 @@ new_map(Process* p, Eterm* reg, BeamInstr* I)
}
static Eterm
-update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
+erts_gc_new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal,
+ Uint live, BeamInstr* ptr)
+{
+ Eterm* keys = tuple_val(keys_literal);
+ Uint n = arityval(*keys);
+ Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */;
+ Uint i;
+ flatmap_t *mp;
+ Eterm *mhp;
+ Eterm *E;
+
+ ASSERT(n <= MAP_SMALL_MAP_LIMIT);
+
+ if (HeapWordsLeft(p) < need) {
+ erts_garbage_collect(p, need, reg, live);
+ }
+
+ mhp = p->htop;
+ E = p->stop;
+
+ mp = (flatmap_t *)mhp; mhp += MAP_HEADER_FLATMAP_SZ;
+ mp->thing_word = MAP_HEADER_FLATMAP;
+ mp->size = n;
+ mp->keys = keys_literal;
+
+ for (i = 0; i < n; i++) {
+ GET_TERM(*ptr++, *mhp++);
+ }
+
+ p->htop = mhp;
+
+ return make_flatmap(mp);
+}
+
+static Eterm
+erts_gc_update_map_assoc(Process* p, Eterm* reg, Uint live,
+ Uint n, BeamInstr* new_p)
{
- Uint n;
Uint num_old;
Uint num_updates;
Uint need;
@@ -6857,23 +2864,18 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
Eterm* E;
Eterm* old_keys;
Eterm* old_vals;
- BeamInstr* new_p;
Eterm new_key;
Eterm* kp;
+ Eterm map;
- new_p = &Arg(5);
- num_updates = Arg(4) / 2;
+ num_updates = n / 2;
+ map = reg[live];
if (is_not_flatmap(map)) {
Uint32 hx;
Eterm val;
- /* apparently the compiler does not emit is_map instructions,
- * bad compiler */
-
- if (is_not_hashmap(map))
- return THE_NON_VALUE;
-
+ ASSERT(is_hashmap(map));
res = map;
E = p->stop;
while(num_updates--) {
@@ -6897,7 +2899,7 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
*/
if (num_old == 0) {
- return new_map(p, reg, I+1);
+ return erts_gc_new_map(p, reg, live, n, new_p);
}
/*
@@ -6907,8 +2909,6 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
need = 2*(num_old+num_updates) + 1 + MAP_HEADER_FLATMAP_SZ;
if (HeapWordsLeft(p) < need) {
- Uint live = Arg(3);
- reg[live] = map;
erts_garbage_collect(p, need, reg, live+1);
map = reg[live];
old_mp = (flatmap_t *)flatmap_val(map);
@@ -7055,9 +3055,8 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
*/
static Eterm
-update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
+erts_gc_update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p)
{
- Uint n;
Uint i;
Uint num_old;
Uint need;
@@ -7067,12 +3066,12 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
Eterm* E;
Eterm* old_keys;
Eterm* old_vals;
- BeamInstr* new_p;
Eterm new_key;
+ Eterm map;
- new_p = &Arg(5);
- n = Arg(4) / 2; /* Number of values to be updated */
+ n /= 2; /* Number of values to be updated */
ASSERT(n > 0);
+ map = reg[live];
if (is_not_flatmap(map)) {
Uint32 hx;
@@ -7126,8 +3125,6 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I)
need = num_old + MAP_HEADER_FLATMAP_SZ;
if (HeapWordsLeft(p) < need) {
- Uint live = Arg(3);
- reg[live] = map;
erts_garbage_collect(p, need, reg, live+1);
map = reg[live];
old_mp = (flatmap_t *)flatmap_val(map);
@@ -7209,24 +3206,27 @@ erts_is_builtin(Eterm Mod, Eterm Name, int arity)
Export e;
Export* ep;
- if (Mod == am_erlang && Name == am_apply && arity == 3) {
- /*
- * Special case. apply/3 is built-in (implemented in C),
- * but implemented in a different way than all other
- * BIFs.
- */
- return 1;
+ if (Mod == am_erlang) {
+ /*
+ * Special case for built-in functions that are implemented
+ * as instructions as opposed to SNIFs.
+ */
+ if (Name == am_apply && (arity == 2 || arity == 3)) {
+ return 1;
+ } else if (Name == am_yield && arity == 0) {
+ return 1;
+ }
}
- e.code[0] = Mod;
- e.code[1] = Name;
- e.code[2] = arity;
+ e.info.mfa.module = Mod;
+ e.info.mfa.function = Name;
+ e.info.mfa.arity = arity;
if ((ep = export_get(&e)) == NULL) {
return 0;
}
- return ep->addressv[erts_active_code_ix()] == ep->code+3
- && (ep->code[3] == (BeamInstr) em_apply_bif);
+ return ep->addressv[erts_active_code_ix()] == ep->beam &&
+ BeamIsOpCode(ep->beam[0], op_apply_bif);
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 0afdedf6c2..e61199a8fd 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,11 +39,14 @@
#include "erl_binary.h"
#include "erl_zlib.h"
#include "erl_map.h"
+#include "erl_process_dict.h"
+#include "erl_unicode.h"
#ifdef HIPE
#include "hipe_bif0.h"
#include "hipe_mode_switch.h"
#include "hipe_arch.h"
+#include "hipe_load.h"
#endif
ErlDrvBinary* erts_gzinflate_buffer(char*, int);
@@ -53,12 +56,6 @@ ErlDrvBinary* erts_gzinflate_buffer(char*, int);
#define DEFINED 1
#define EXPORTED 2
-#ifdef NO_JUMP_TABLE
-# define BeamOpCode(Op) ((BeamInstr)(Op))
-#else
-# define BeamOpCode(Op) ((BeamInstr)beam_ops[Op])
-#endif
-
#if defined(WORDS_BIGENDIAN)
# define NATIVE_ENDIAN(F) \
if ((F).val & BSF_NATIVE) { \
@@ -79,16 +76,28 @@ ErlDrvBinary* erts_gzinflate_buffer(char*, int);
#define TE_FAIL (-1)
#define TE_SHORT_WINDOW (-2)
+/*
+ * Type for a reference to a label that must be patched.
+ */
+
typedef struct {
- Uint value; /* Value of label (NULL if not known yet). */
- Sint patches; /* Index (into code buffer) to first location
- * which must be patched with the value of this label.
- */
-#ifdef ERTS_SMP
+ Uint pos; /* Position of label reference to patch. */
+ Uint offset; /* Offset from patch location. */
+ int packed; /* 0 (not packed), 1 (lsw), 2 (msw) */
+} LabelPatch;
+
+/*
+ * Type for a label.
+ */
+
+typedef struct {
+ Uint value; /* Value of label (0 if not known yet). */
Uint looprec_targeted; /* Non-zero if this label is the target of a loop_rec
* instruction.
*/
-#endif
+ LabelPatch* patches; /* Array of label patches. */
+ Uint num_patches; /* Number of patches in array. */
+ Uint num_allocated; /* Number of allocated patches. */
} Label;
/*
@@ -155,13 +164,15 @@ typedef struct {
#define STR_CHUNK 2
#define IMP_CHUNK 3
#define EXP_CHUNK 4
-#define NUM_MANDATORY 5
+#define MIN_MANDATORY 1
+#define MAX_MANDATORY 5
#define LAMBDA_CHUNK 5
#define LITERAL_CHUNK 6
#define ATTR_CHUNK 7
#define COMPILE_CHUNK 8
#define LINE_CHUNK 9
+#define UTF8_ATOM_CHUNK 10
#define NUM_CHUNK_TYPES (sizeof(chunk_types)/sizeof(chunk_types[0]))
@@ -171,9 +182,13 @@ typedef struct {
static Uint chunk_types[] = {
/*
- * Mandatory chunk types -- these MUST be present.
+ * Atom chunk types -- Atom or AtU8 MUST be present.
*/
MakeIffId('A', 't', 'o', 'm'), /* 0 */
+
+ /*
+ * Mandatory chunk types -- these MUST be present.
+ */
MakeIffId('C', 'o', 'd', 'e'), /* 1 */
MakeIffId('S', 't', 'r', 'T'), /* 2 */
MakeIffId('I', 'm', 'p', 'T'), /* 3 */
@@ -187,6 +202,7 @@ static Uint chunk_types[] = {
MakeIffId('A', 't', 't', 'r'), /* 7 */
MakeIffId('C', 'I', 'n', 'f'), /* 8 */
MakeIffId('L', 'i', 'n', 'e'), /* 9 */
+ MakeIffId('A', 't', 'U', '8'), /* 10 */
};
/*
@@ -218,7 +234,7 @@ typedef struct {
typedef struct literal_patch LiteralPatch;
struct literal_patch {
- int pos; /* Position in code */
+ Uint pos; /* Position in code */
LiteralPatch* next;
};
@@ -298,6 +314,7 @@ typedef struct LoaderState {
int on_load; /* Index in the code for the on_load function
* (or 0 if there is no on_load function)
*/
+ int otp_20_or_higher; /* Compiled with OTP 20 or higher */
/*
* Atom table.
@@ -442,7 +459,7 @@ typedef struct LoaderState {
#ifdef DEBUG
# define GARBAGE 0xCC
-# define DEBUG_INIT_GENOP(Dst) memset(Dst, GARBAGE, sizeof(GenOp))
+# define DEBUG_INIT_GENOP(Dst) sys_memset(Dst, GARBAGE, sizeof(GenOp))
#else
# define DEBUG_INIT_GENOP(Dst)
#endif
@@ -479,15 +496,18 @@ typedef struct LoaderState {
static void free_loader_state(Binary* magic);
static ErlHeapFragment* new_literal_fragment(Uint size);
static void free_literal_fragment(ErlHeapFragment*);
-static void loader_state_dtor(Binary* magic);
+static int loader_state_dtor(Binary* magic);
+#ifdef HIPE
static Eterm stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm module,
- BeamCodeHeader* code, Uint size);
+ BeamCodeHeader* code_hdr, Uint size,
+ HipeModule *hipe_code);
+#endif
static int init_iff_file(LoaderState* stp, byte* code, Uint size);
static int scan_iff_file(LoaderState* stp, Uint* chunk_types,
- Uint num_types, Uint num_mandatory);
+ Uint num_types);
static int verify_chunks(LoaderState* stp);
-static int load_atom_table(LoaderState* stp);
+static int load_atom_table(LoaderState* stp, ErtsAtomEncoding enc);
static int load_import_table(LoaderState* stp);
static int read_export_table(LoaderState* stp);
static int is_bif(Eterm mod, Eterm func, unsigned arity);
@@ -495,6 +515,7 @@ static int read_lambda_table(LoaderState* stp);
static int read_literal_table(LoaderState* stp);
static int read_line_table(LoaderState* stp);
static int read_code_header(LoaderState* stp);
+static void init_label(Label* lp);
static int load_code(LoaderState* stp);
static GenOp* gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index,
GenOpArg Tuple, GenOpArg Dst);
@@ -525,6 +546,7 @@ static int get_tag_and_value(LoaderState* stp, Uint len_code,
static int new_label(LoaderState* stp);
static void new_literal_patch(LoaderState* stp, int pos);
static void new_string_patch(LoaderState* stp, int pos);
+static int find_literal(LoaderState* stp, Eterm needle, Uint *idx);
static Uint new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size);
static int genopargcompare(GenOpArg* a, GenOpArg* b);
static Eterm get_module_info(Process* p, ErtsCodeIndex code_ix,
@@ -532,13 +554,12 @@ static Eterm get_module_info(Process* p, ErtsCodeIndex code_ix,
static Eterm exported_from_module(Process* p, ErtsCodeIndex code_ix,
Eterm mod);
static Eterm functions_in_module(Process* p, BeamCodeHeader*);
+static Eterm nifs_in_module(Process* p, Eterm module);
static Eterm attributes_for_module(Process* p, BeamCodeHeader*);
static Eterm compilation_info_for_module(Process* p, BeamCodeHeader*);
static Eterm md5_of_module(Process* p, BeamCodeHeader*);
static Eterm has_native(BeamCodeHeader*);
static Eterm native_addresses(Process* p, BeamCodeHeader*);
-int patch_funentries(Eterm Patchlist);
-int patch(Eterm Addresses, Uint fe);
static int safe_mul(UWord a, UWord b, UWord* resp);
static int must_swap_floats;
@@ -626,7 +647,7 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
CHKALLOC();
CHKBLK(ERTS_ALC_T_CODE,stp->code);
if (!init_iff_file(stp, code, unloaded_size) ||
- !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
goto load_error;
}
@@ -671,9 +692,16 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
*/
CHKBLK(ERTS_ALC_T_CODE,stp->code);
- define_file(stp, "atom table", ATOM_CHUNK);
- if (!load_atom_table(stp)) {
- goto load_error;
+ if (stp->chunks[UTF8_ATOM_CHUNK].size > 0) {
+ define_file(stp, "utf8 atom table", UTF8_ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_UTF8)) {
+ goto load_error;
+ }
+ } else {
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_LATIN1)) {
+ goto load_error;
+ }
}
/*
@@ -723,6 +751,13 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
}
/*
+ * Find out whether the code was compiled with OTP 20
+ * or higher.
+ */
+
+ stp->otp_20_or_higher = stp->chunks[UTF8_ATOM_CHUNK].size > 0;
+
+ /*
* Load the code chunk.
*/
@@ -773,13 +808,8 @@ erts_finish_loading(Binary* magic, Process* c_p,
struct erl_module_instance* inst_p;
Uint size;
- /*
- * No other process may run since we will update the export
- * table which is not protected by any locks.
- */
-
- ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() ||
- erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() ||
+ erts_thr_progress_is_blocking());
/*
* Make current code for the module old and insert the new code
* as current. This will fail if there already exists old code
@@ -797,30 +827,29 @@ erts_finish_loading(Binary* magic, Process* c_p,
} else {
ErtsCodeIndex code_ix = erts_staging_code_ix();
Eterm module = stp->module;
- int i;
+ int i, num_exps;
/*
* There is an -on_load() function. We will keep the current
* code, but we must turn off any tracing.
*/
-
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
- if (ep == NULL || ep->code[0] != module) {
+ if (ep == NULL || ep->info.mfa.module != module) {
continue;
}
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ if (ep->addressv[code_ix] == ep->beam) {
+ if (BeamIsOpCode(ep->beam[0], op_apply_bif)) {
continue;
- } else if (ep->code[3] ==
- (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ } else if (BeamIsOpCode(ep->beam[0], op_i_generic_breakpoint)) {
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
ASSERT(mod_tab_p->curr.num_traced_exports > 0);
- erts_clear_export_break(mod_tab_p, ep->code+3);
- ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
- ep->code[4] = 0;
+ erts_clear_export_break(mod_tab_p, &ep->info);
+ ep->addressv[code_ix] = (BeamInstr *) ep->beam[1];
+ ep->beam[1] = 0;
}
- ASSERT(ep->code[4] == 0);
+ ASSERT(ep->beam[1] == 0);
}
}
ASSERT(mod_tab_p->curr.num_breakpoints == 0);
@@ -842,9 +871,7 @@ erts_finish_loading(Binary* magic, Process* c_p,
erts_alloc(ERTS_ALC_T_PREPARED_CODE,
sizeof(struct erl_module_instance));
inst_p = mod_tab_p->on_load;
- inst_p->nif = 0;
- inst_p->num_breakpoints = 0;
- inst_p->num_traced_exports = 0;
+ erts_module_instance_init(inst_p);
}
inst_p->code_hdr = stp->hdr;
@@ -898,7 +925,7 @@ erts_alloc_loader_state(void)
magic = erts_create_magic_binary(sizeof(LoaderState),
loader_state_dtor);
- erts_refc_inc(&magic->refc, 1);
+ erts_refc_inc(&magic->intern.refc, 1);
stp = ERTS_MAGIC_BIN_DATA(magic);
stp->bin = NULL;
stp->function = THE_NON_VALUE; /* Function not known yet */
@@ -943,6 +970,13 @@ erts_module_for_prepared_code(Binary* magic)
LoaderState* stp;
if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != loader_state_dtor) {
+#ifdef HIPE
+ HipeLoaderState *hipe_stp;
+ if ((hipe_stp = hipe_get_loader_state(magic))
+ && hipe_stp->text_segment != 0) {
+ return hipe_stp->module;
+ }
+#endif
return NIL;
}
stp = ERTS_MAGIC_BIN_DATA(magic);
@@ -974,9 +1008,7 @@ static void
free_loader_state(Binary* magic)
{
loader_state_dtor(magic);
- if (erts_refc_dectest(&magic->refc, 0) == 0) {
- erts_bin_free(magic);
- }
+ erts_bin_release(magic);
}
static ErlHeapFragment* new_literal_fragment(Uint size)
@@ -1004,7 +1036,7 @@ static void free_literal_fragment(ErlHeapFragment* bp)
/*
* This destructor function can safely be called multiple times.
*/
-static void
+static int
loader_state_dtor(Binary* magic)
{
LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic);
@@ -1023,6 +1055,10 @@ loader_state_dtor(Binary* magic)
stp->codev = 0;
}
if (stp->labels != 0) {
+ Uint num;
+ for (num = 0; num < stp->num_labels; num++) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels[num].patches);
+ }
erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels);
stp->labels = 0;
}
@@ -1089,12 +1125,15 @@ loader_state_dtor(Binary* magic)
*/
ASSERT(stp->genop_blocks == 0);
+ return 1;
}
+#ifdef HIPE
static Eterm
stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm module,
- BeamCodeHeader* code_hdr, Uint size)
+ BeamCodeHeader* code_hdr, Uint size,
+ HipeModule *hipe_code)
{
Module* modp;
Eterm retval;
@@ -1117,6 +1156,9 @@ stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
modp->curr.code_hdr = code_hdr;
modp->curr.code_length = size;
modp->curr.catches = BEAM_CATCHES_NIL; /* Will be filled in later. */
+ DBG_TRACE_MFA(make_atom(modp->module), 0, 0, "insert_new_code "
+ "first_hipe_ref = %p", hipe_code->first_hipe_ref);
+ modp->curr.hipe_code = hipe_code;
/*
* Update ranges (used for finding a function from a PC value).
@@ -1125,6 +1167,7 @@ stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
erts_update_ranges((BeamInstr*)modp->curr.code_hdr, size);
return NIL;
}
+#endif
static int
init_iff_file(LoaderState* stp, byte* code, Uint size)
@@ -1198,7 +1241,7 @@ init_iff_file(LoaderState* stp, byte* code, Uint size)
* Scan the IFF file. The header should have been verified by init_iff_file().
*/
static int
-scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mandatory)
+scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types)
{
Uint count;
Uint id;
@@ -1277,7 +1320,16 @@ verify_chunks(LoaderState* stp)
MD5_CTX context;
MD5Init(&context);
- for (i = 0; i < NUM_MANDATORY; i++) {
+
+ if (stp->chunks[UTF8_ATOM_CHUNK].start != NULL) {
+ MD5Update(&context, stp->chunks[UTF8_ATOM_CHUNK].start, stp->chunks[UTF8_ATOM_CHUNK].size);
+ } else if (stp->chunks[ATOM_CHUNK].start != NULL) {
+ MD5Update(&context, stp->chunks[ATOM_CHUNK].start, stp->chunks[ATOM_CHUNK].size);
+ } else {
+ LoadError0(stp, "mandatory chunk of type 'Atom' or 'AtU8' not found\n");
+ }
+
+ for (i = MIN_MANDATORY; i < MAX_MANDATORY; i++) {
if (stp->chunks[i].start != NULL) {
MD5Update(&context, stp->chunks[i].start, stp->chunks[i].size);
} else {
@@ -1338,7 +1390,7 @@ verify_chunks(LoaderState* stp)
}
static int
-load_atom_table(LoaderState* stp)
+load_atom_table(LoaderState* stp, ErtsAtomEncoding enc)
{
unsigned int i;
@@ -1357,7 +1409,7 @@ load_atom_table(LoaderState* stp)
GetByte(stp, n);
GetString(stp, atom, n);
- stp->atom[i] = erts_atom_put(atom, n, ERTS_ATOM_ENC_LATIN1, 1);
+ stp->atom[i] = erts_atom_put(atom, n, enc, 1);
}
/*
@@ -1371,7 +1423,7 @@ load_atom_table(LoaderState* stp)
Atom* ap;
ap = atom_tab(atom_val(stp->atom[1]));
- memcpy(sbuf, ap->name, ap->len);
+ sys_memcpy(sbuf, ap->name, ap->len);
sbuf[ap->len] = '\0';
LoadError1(stp, "module name in object code is %s", sbuf);
}
@@ -1420,8 +1472,8 @@ load_import_table(LoaderState* stp)
* the BIF function.
*/
if ((e = erts_active_export_entry(mod, func, arity)) != NULL) {
- if (e->code[3] == (BeamInstr) em_apply_bif) {
- stp->import[i].bf = (BifFunction) e->code[4];
+ if (BeamIsOpCode(e->beam[0], op_apply_bif)) {
+ stp->import[i].bf = (BifFunction) e->beam[1];
if (func == am_load_nif && mod == am_erlang && arity == 2) {
stp->may_load_nif = 1;
}
@@ -1490,7 +1542,7 @@ read_export_table(LoaderState* stp)
* any other functions that walk through all local functions.
*/
- if (stp->labels[n].patches >= 0) {
+ if (stp->labels[n].num_patches > 0) {
LoadError3(stp, "there are local calls to the stub for "
"the BIF %T:%T/%d",
stp->module, func, arity);
@@ -1514,7 +1566,7 @@ is_bif(Eterm mod, Eterm func, unsigned arity)
if (e == NULL) {
return 0;
}
- if (e->code[3] != (BeamInstr) em_apply_bif) {
+ if (! BeamIsOpCode(e->beam[0], op_apply_bif)) {
return 0;
}
if (mod == am_erlang && func == am_apply && arity == 3) {
@@ -1756,7 +1808,7 @@ read_line_table(LoaderState* stp)
GetInt(stp, 2, n);
GetString(stp, fname, n);
- stp->fname[i] = erts_atom_put(fname, n, ERTS_ATOM_ENC_LATIN1, 1);
+ stp->fname[i] = erts_atom_put(fname, n, ERTS_ATOM_ENC_UTF8, 1);
}
}
@@ -1836,11 +1888,7 @@ read_code_header(LoaderState* stp)
stp->labels = (Label *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_labels * sizeof(Label));
for (i = 0; i < stp->num_labels; i++) {
- stp->labels[i].value = 0;
- stp->labels[i].patches = -1;
-#ifdef ERTS_SMP
- stp->labels[i].looprec_targeted = 0;
-#endif
+ init_label(&stp->labels[i]);
}
stp->catches = 0;
@@ -1869,12 +1917,43 @@ read_code_header(LoaderState* stp)
#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
+static void init_label(Label* lp)
+{
+ lp->value = 0;
+ lp->looprec_targeted = 0;
+ lp->num_patches = 0;
+ lp->num_allocated = 4;
+ lp->patches = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ lp->num_allocated * sizeof(LabelPatch));
+}
+
+static void
+register_label_patch(LoaderState* stp, Uint label, Uint ci, Uint offset)
+{
+ Label* lp;
+
+ ASSERT(label < stp->num_labels);
+ lp = &stp->labels[label];
+ if (lp->num_allocated <= lp->num_patches) {
+ lp->num_allocated *= 2;
+ lp->patches = erts_realloc(ERTS_ALC_T_PREPARED_CODE,
+ (void *) lp->patches,
+ lp->num_allocated * sizeof(LabelPatch));
+ }
+ lp->patches[lp->num_patches].pos = ci;
+ lp->patches[lp->num_patches].offset = offset;
+ lp->patches[lp->num_patches].packed = 0;
+ lp->num_patches++;
+ stp->codev[ci] = label;
+}
+
static int
load_code(LoaderState* stp)
{
int i;
- int ci;
- int last_func_start = 0; /* Needed by nif loading and line instructions */
+ Uint ci;
+ Uint last_instr_start; /* Needed for relative jumps */
+ Uint last_func_start = 0; /* Needed by nif loading and line instructions */
char* sign;
int arg; /* Number of current argument. */
int num_specific; /* Number of specific ops for current. */
@@ -1887,13 +1966,16 @@ load_code(LoaderState* stp)
GenOp** last_op_next = NULL;
int arity;
int retval = 1;
+#if defined(BEAM_WIDE_SHIFT)
+ int num_trailing_f; /* Number of extra 'f' arguments in a list */
+#endif
/*
* The size of the loaded func_info instruction is needed
* by both the nif functionality and line instructions.
*/
enum {
- FUNC_INFO_SZ = 5
+ FUNC_INFO_SZ = sizeof(ErtsCodeInfo) / sizeof(Eterm)
};
code = stp->codev;
@@ -1992,30 +2074,10 @@ load_code(LoaderState* stp)
case 0:
/* Floating point number.
* Not generated by the compiler in R16B and later.
+ * (The literal pool is used instead.)
*/
- {
- Eterm* hp;
-#if !defined(ARCH_64)
- Uint high, low;
-# endif
- last_op->a[arg].val = new_literal(stp, &hp,
- FLOAT_SIZE_OBJECT);
- hp[0] = HEADER_FLONUM;
- last_op->a[arg].type = TAG_q;
-#if defined(ARCH_64)
- GetInt(stp, 8, hp[1]);
-# else
- GetInt(stp, 4, high);
- GetInt(stp, 4, low);
- if (must_swap_floats) {
- Uint t = high;
- high = low;
- low = t;
- }
- hp[1] = high;
- hp[2] = low;
-# endif
- }
+ LoadError0(stp, "please re-compile this module with an "
+ ERLANG_OTP_RELEASE " compiler");
break;
case 1: /* List. */
if (arg+1 != arity) {
@@ -2028,7 +2090,7 @@ load_code(LoaderState* stp)
erts_alloc(ERTS_ALC_T_LOADER_TMP,
(arity+last_op->a[arg].val)
*sizeof(GenOpArg));
- memcpy(last_op->a, last_op->def_args,
+ sys_memcpy(last_op->a, last_op->def_args,
arity*sizeof(GenOpArg));
arity += last_op->a[arg].val;
break;
@@ -2250,7 +2312,8 @@ load_code(LoaderState* stp)
stp->specific_op = specific;
CodeNeed(opc[stp->specific_op].sz+16); /* Extra margin for packing */
- code[ci++] = BeamOpCode(stp->specific_op);
+ last_instr_start = ci + opc[stp->specific_op].adjust;
+ code[ci++] = BeamOpCodeAddr(stp->specific_op);
}
/*
@@ -2323,8 +2386,18 @@ load_code(LoaderState* stp)
code[ci++] = NIL;
break;
case TAG_q:
- new_literal_patch(stp, ci);
- code[ci++] = tmp_op->a[arg].val;
+ {
+ BeamInstr val = tmp_op->a[arg].val;
+ Eterm term = stp->literals[val].term;
+ new_literal_patch(stp, ci);
+ code[ci++] = val;
+ switch (loader_tag(term)) {
+ case LOADER_X_REG:
+ case LOADER_Y_REG:
+ LoadError1(stp, "the term '%T' would be confused "
+ "with a register", term);
+ }
+ }
break;
default:
LoadError1(stp, "bad tag %d for general source",
@@ -2332,7 +2405,8 @@ load_code(LoaderState* stp)
break;
}
break;
- case 'd': /* Destination (x(0), x(N), y(N) */
+ case 'd': /* Destination (x(N), y(N) */
+ case 'S': /* Source (x(N), y(N)) */
switch (tag) {
case TAG_x:
code[ci++] = tmp_op->a[arg].val * sizeof(Eterm);
@@ -2346,11 +2420,29 @@ load_code(LoaderState* stp)
break;
}
break;
- case 'I': /* Untagged integer (or pointer). */
- VerifyTag(stp, tag, TAG_u);
- code[ci++] = tmp_op->a[arg].val;
- break;
- case 't': /* Small untagged integer -- can be packed. */
+ case 't': /* Small untagged integer (16 bits) -- can be packed. */
+ case 'I': /* Untagged integer (32 bits) -- can be packed. */
+ case 'W': /* Untagged integer or pointer (machine word). */
+#ifdef DEBUG
+ switch (*sign) {
+ case 't':
+ if (tmp_op->a[arg].val >> 16 != 0) {
+ load_printf(__LINE__, stp, "value %lu of type 't' does not fit in 16 bits",
+ tmp_op->a[arg].val);
+ ASSERT(0);
+ }
+ break;
+#ifdef ARCH_64
+ case 'I':
+ if (tmp_op->a[arg].val >> 32 != 0) {
+ load_printf(__LINE__, stp, "value %lu of type 'I' does not fit in 32 bits",
+ tmp_op->a[arg].val);
+ ASSERT(0);
+ }
+ break;
+#endif
+ }
+#endif
VerifyTag(stp, tag, TAG_u);
code[ci++] = tmp_op->a[arg].val;
break;
@@ -2360,16 +2452,14 @@ load_code(LoaderState* stp)
break;
case 'f': /* Destination label */
VerifyTag(stp, tag_to_letter[tag], *sign);
- code[ci] = stp->labels[tmp_op->a[arg].val].patches;
- stp->labels[tmp_op->a[arg].val].patches = ci;
+ register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start);
ci++;
break;
case 'j': /* 'f' or 'p' */
if (tag == TAG_p) {
code[ci] = 0;
} else if (tag == TAG_f) {
- code[ci] = stp->labels[tmp_op->a[arg].val].patches;
- stp->labels[tmp_op->a[arg].val].patches = ci;
+ register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start);
} else {
LoadError3(stp, "bad tag %d; expected %d or %d",
tag, TAG_f, TAG_p);
@@ -2389,7 +2479,6 @@ load_code(LoaderState* stp)
LoadError1(stp, "label %d defined more than once", last_label);
}
stp->labels[last_label].value = ci;
- ASSERT(stp->labels[last_label].patches < ci);
break;
case 'e': /* Export entry */
VerifyTag(stp, tag, TAG_u);
@@ -2435,39 +2524,168 @@ load_code(LoaderState* stp)
* The packing engine.
*/
if (opc[stp->specific_op].pack[0]) {
- char* prog; /* Program for packing engine. */
- BeamInstr stack[8]; /* Stack. */
- BeamInstr* sp = stack; /* Points to next free position. */
- BeamInstr packed = 0; /* Accumulator for packed operations. */
+ char* prog; /* Program for packing engine. */
+ struct pack_stack {
+ BeamInstr instr;
+ Uint* patch_pos;
+ } stack[8]; /* Stack. */
+ struct pack_stack* sp = stack; /* Points to next free position. */
+ BeamInstr packed = 0; /* Accumulator for packed operations. */
+ LabelPatch* packed_label = 0;
for (prog = opc[stp->specific_op].pack; *prog; prog++) {
switch (*prog) {
- case 'g': /* Get instruction; push on stack. */
- *sp++ = code[--ci];
- break;
- case 'i': /* Initialize packing accumulator. */
- packed = code[--ci];
+ case 'g': /* Get operand and push on stack. */
+ ci--;
+ sp->instr = code[ci];
+ sp->patch_pos = 0;
+ sp++;
+ break;
+ case 'f': /* Get possible 'f' operand and push on stack. */
+ {
+ Uint w = code[--ci];
+ sp->instr = w;
+ sp->patch_pos = 0;
+
+ if (w != 0) {
+ LabelPatch* lbl_p;
+ int num_patches;
+ int patch;
+
+ ASSERT(w < stp->num_labels);
+ lbl_p = stp->labels[w].patches;
+ num_patches = stp->labels[w].num_patches;
+ for (patch = num_patches - 1; patch >= 0; patch--) {
+ if (lbl_p[patch].pos == ci) {
+ sp->patch_pos = &lbl_p[patch].pos;
+ break;
+ }
+ }
+ ASSERT(sp->patch_pos);
+ }
+ sp++;
+ }
+ break;
+ case 'q': /* Get possible 'q' operand and push on stack. */
+ {
+ LiteralPatch* lp;
+
+ ci--;
+ sp->instr = code[ci];
+ sp->patch_pos = 0;
+
+ for (lp = stp->literal_patches;
+ lp && lp->pos > ci-MAX_OPARGS;
+ lp = lp->next) {
+ if (lp->pos == ci) {
+ sp->patch_pos = &lp->pos;
+ break;
+ }
+ }
+ sp++;
+ }
+ break;
+#ifdef ARCH_64
+ case '1': /* Tightest shift (always 10 bits) */
+ ci--;
+ ASSERT((code[ci] & ~0x1FF8ull) == 0); /* Fits in 10 bits */
+ packed = (packed << BEAM_TIGHTEST_SHIFT);
+ packed |= code[ci] >> 3;
+ if (packed_label) {
+ packed_label->packed++;
+ }
break;
- case '0': /* Tight shift */
+#endif
+ case '2': /* Tight shift (10 or 16 bits) */
packed = (packed << BEAM_TIGHT_SHIFT) | code[--ci];
+ if (packed_label) {
+ packed_label->packed++;
+ }
break;
- case '6': /* Shift 16 steps */
+ case '3': /* Loose shift (16 bits) */
packed = (packed << BEAM_LOOSE_SHIFT) | code[--ci];
+ if (packed_label) {
+ packed_label->packed++;
+ }
break;
#ifdef ARCH_64
- case 'w': /* Shift 32 steps */
- packed = (packed << BEAM_WIDE_SHIFT) | code[--ci];
- break;
+ case '4': /* Wide shift (32 bits) */
+ {
+ Uint w = code[--ci];
+
+ if (packed_label) {
+ packed_label->packed++;
+ }
+
+ /*
+ * 'w' can handle both labels ('f' and 'j'), as well
+ * as 'I'. Test whether this is a label.
+ */
+
+ if (w < stp->num_labels) {
+ /*
+ * Probably a label. Look for patch pointing to this
+ * position.
+ */
+ LabelPatch* lp = stp->labels[w].patches;
+ int num_patches = stp->labels[w].num_patches;
+ int patch;
+ for (patch = num_patches - 1; patch >= 0; patch--) {
+ if (lp[patch].pos == ci) {
+ lp[patch].packed = 1;
+ packed_label = &lp[patch];
+ break;
+ }
+ }
+ }
+ packed = (packed << BEAM_WIDE_SHIFT) |
+ (code[ci] & BEAM_WIDE_MASK);
+ }
+ break;
#endif
case 'p': /* Put instruction (from stack). */
- code[ci++] = *--sp;
+ --sp;
+ code[ci] = sp->instr;
+ if (sp->patch_pos) {
+ *sp->patch_pos = ci;
+ }
+ ci++;
break;
- case 'P': /* Put packed operands. */
- *sp++ = packed;
+ case 'P': /* Put packed operands (on the stack). */
+ sp->instr = packed;
+ sp->patch_pos = 0;
+ if (packed_label) {
+ sp->patch_pos = &packed_label->pos;
+ packed_label = 0;
+ }
+ sp++;
packed = 0;
break;
+#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
+ case '#': /* -1 */
+ case '$': /* -2 */
+ case '%': /* -3 */
+ case '&': /* -4 */
+ case '\'': /* -5 */
+ case '(': /* -6 */
+ /* Pack accumulator contents into instruction word. */
+ {
+ Sint pos = ci - (*prog - '#' + 1);
+ /* Are the high 32 bits of the instruction word zero? */
+ ASSERT((code[pos] & ~((1ull << BEAM_WIDE_SHIFT)-1)) == 0);
+ code[pos] |= packed << BEAM_WIDE_SHIFT;
+ if (packed_label) {
+ ASSERT(packed_label->packed == 1);
+ packed_label->pos = pos;
+ packed_label->packed = 2;
+ packed_label = 0;
+ }
+ packed >>= BEAM_WIDE_SHIFT;
+ }
+ break;
+#endif
default:
- ASSERT(0);
+ erts_exit(ERTS_ERROR_EXIT, "beam_load: invalid packing op: %c\n", *prog);
}
}
ASSERT(sp == stack); /* Incorrect program? */
@@ -2477,7 +2695,17 @@ load_code(LoaderState* stp)
* Load any list arguments using the primitive tags.
*/
+#if defined(BEAM_WIDE_SHIFT)
+ num_trailing_f = 0;
+#endif
for ( ; arg < tmp_op->arity; arg++) {
+#if defined(BEAM_WIDE_SHIFT)
+ if (tmp_op->a[arg].type == TAG_f) {
+ num_trailing_f++;
+ } else {
+ num_trailing_f = 0;
+ }
+#endif
switch (tmp_op->a[arg].type) {
case TAG_i:
CodeNeed(1);
@@ -2491,8 +2719,7 @@ load_code(LoaderState* stp)
break;
case TAG_f:
CodeNeed(1);
- code[ci] = stp->labels[tmp_op->a[arg].val].patches;
- stp->labels[tmp_op->a[arg].val].patches = ci;
+ register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start);
ci++;
break;
case TAG_x:
@@ -2518,6 +2745,61 @@ load_code(LoaderState* stp)
}
}
+ /*
+ * If all the extra arguments were 'f' operands,
+ * and the wordsize is 64 bits, pack two 'f' operands
+ * into each word.
+ */
+
+#if defined(BEAM_WIDE_SHIFT)
+ if (num_trailing_f >= 1) {
+ Uint src_index = ci - num_trailing_f;
+ Uint src_limit = ci;
+ Uint dst_limit = src_index + (num_trailing_f+1)/2;
+
+ ci = src_index;
+ while (ci < dst_limit) {
+ Uint w[2];
+ BeamInstr packed = 0;
+ int wi;
+
+ w[0] = code[src_index];
+ if (src_index+1 < src_limit) {
+ w[1] = code[src_index+1];
+ } else {
+ w[1] = 0;
+ }
+ for (wi = 0; wi < 2; wi++) {
+ Uint lbl = w[wi];
+ LabelPatch* lp = stp->labels[lbl].patches;
+ int num_patches = stp->labels[lbl].num_patches;
+
+#if defined(WORDS_BIGENDIAN)
+ packed <<= BEAM_WIDE_SHIFT;
+ packed |= lbl & BEAM_WIDE_MASK;
+#else
+ packed >>= BEAM_WIDE_SHIFT;
+ packed |= lbl << BEAM_WIDE_SHIFT;
+#endif
+ while (num_patches-- > 0) {
+ if (lp->pos == src_index + wi) {
+ lp->pos = ci;
+#if defined(WORDS_BIGENDIAN)
+ lp->packed = 2 - wi;
+#else
+ lp->packed = wi + 1;
+#endif
+ break;
+ }
+ lp++;
+ }
+ }
+ code[ci++] = packed;
+ src_index += 2;
+ }
+ }
+#endif
+
/*
* Handle a few special cases.
*/
@@ -2532,15 +2814,10 @@ load_code(LoaderState* stp)
if (stp->may_load_nif) {
const int finfo_ix = ci - FUNC_INFO_SZ;
-#ifdef ERTS_DIRTY_SCHEDULERS
- enum { MIN_FUNC_SZ = 4 };
-#else
- enum { MIN_FUNC_SZ = 3 };
-#endif
- if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
+ if (finfo_ix - last_func_start < BEAM_NIF_MIN_FUNC_SZ && last_func_start) {
/* Must make room for call_nif op */
- int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
- ASSERT(pad > 0 && pad < MIN_FUNC_SZ);
+ int pad = BEAM_NIF_MIN_FUNC_SZ - (finfo_ix - last_func_start);
+ ASSERT(pad > 0 && pad < BEAM_NIF_MIN_FUNC_SZ);
CodeNeed(pad);
sys_memmove(&code[finfo_ix+pad], &code[finfo_ix],
FUNC_INFO_SZ*sizeof(BeamInstr));
@@ -2565,18 +2842,20 @@ load_code(LoaderState* stp)
stp->function = code[ci-2];
stp->arity = code[ci-1];
+ /* When this assert is triggered, it is normally a sign that
+ the size of the ops.tab i_func_info instruction is not
+ the same as FUNC_INFO_SZ */
ASSERT(stp->labels[last_label].value == ci - FUNC_INFO_SZ);
- stp->hdr->functions[function_number] = (BeamInstr*) stp->labels[last_label].patches;
offset = function_number;
- stp->labels[last_label].patches = offset;
+ register_label_patch(stp, last_label, offset, 0);
function_number++;
if (stp->arity > MAX_ARG) {
LoadError1(stp, "too many arguments: %d", stp->arity);
}
#ifdef DEBUG
- ASSERT(stp->labels[0].patches < 0); /* Should not be referenced. */
+ ASSERT(stp->labels[0].num_patches == 0); /* Should not be referenced. */
for (i = 1; i < stp->num_labels; i++) {
- ASSERT(stp->labels[i].patches < ci);
+ ASSERT(stp->labels[i].num_patches <= stp->labels[i].num_allocated);
}
#endif
}
@@ -2587,8 +2866,8 @@ load_code(LoaderState* stp)
/* Remember offset for the on_load function. */
stp->on_load = ci;
break;
- case op_bs_put_string_II:
- case op_i_bs_match_string_xfII:
+ case op_bs_put_string_WW:
+ case op_i_bs_match_string_xfWW:
new_string_patch(stp, ci-1);
break;
@@ -2699,6 +2978,12 @@ load_code(LoaderState* stp)
#define never(St) 0
+static int
+compiled_with_otp_20_or_higher(LoaderState* stp)
+{
+ return stp->otp_20_or_higher;
+}
+
/*
* Predicate that tests whether a jump table can be used.
*/
@@ -2838,17 +3123,18 @@ gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index,
op->next = NULL;
if (Index.type == TAG_i && Index.val > 0 &&
+ Index.val <= ERTS_MAX_TUPLE_SIZE &&
(Tuple.type == TAG_x || Tuple.type == TAG_y)) {
op->op = genop_i_fast_element_4;
- op->a[0] = Fail;
- op->a[1] = Tuple;
+ op->a[0] = Tuple;
+ op->a[1] = Fail;
op->a[2].type = TAG_u;
op->a[2].val = Index.val;
op->a[3] = Dst;
} else {
op->op = genop_i_element_4;
- op->a[0] = Fail;
- op->a[1] = Tuple;
+ op->a[0] = Tuple;
+ op->a[1] = Fail;
op->a[2] = Index;
op->a[3] = Dst;
}
@@ -2928,13 +3214,14 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
op->a[0] = Ms;
op->a[1] = Fail;
op->a[2] = Dst;
+#ifdef ARCH_64
} else if (bits == 32 && (Flags.val & BSF_LITTLE) == 0) {
- op->op = genop_i_bs_get_integer_32_4;
- op->arity = 4;
+ op->op = genop_i_bs_get_integer_32_3;
+ op->arity = 3;
op->a[0] = Ms;
op->a[1] = Fail;
- op->a[2] = Live;
- op->a[3] = Dst;
+ op->a[2] = Dst;
+#endif
} else {
generic:
if (bits < SMALL_BITS) {
@@ -3069,16 +3356,6 @@ gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
}
/*
- * Predicate to test whether a heap binary should be generated.
- */
-
-static int
-should_gen_heap_bin(LoaderState* stp, GenOpArg Src)
-{
- return Src.val <= ERL_ONHEAP_BIN_LIMIT;
-}
-
-/*
* Predicate to test whether a binary construction is too big.
*/
@@ -3350,27 +3627,14 @@ negation_is_small(LoaderState* stp, GenOpArg Int)
IS_SSMALL(-((Sint)Int.val));
}
-
-static int
-smp(LoaderState* stp)
-{
-#ifdef ERTS_SMP
- return 1;
-#else
- return 0;
-#endif
-}
-
/*
* Mark this label.
*/
static int
smp_mark_target_label(LoaderState* stp, GenOpArg L)
{
-#ifdef ERTS_SMP
ASSERT(L.type == TAG_f);
stp->labels[L.val].looprec_targeted = 1;
-#endif
return 1;
}
@@ -3381,12 +3645,8 @@ smp_mark_target_label(LoaderState* stp, GenOpArg L)
static int
smp_already_locked(LoaderState* stp, GenOpArg L)
{
-#ifdef ERTS_SMP
ASSERT(L.type == TAG_u);
return stp->labels[L.val].looprec_targeted;
-#else
- return 0;
-#endif
}
/*
@@ -3400,11 +3660,11 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
Sint timeout;
NEW_GENOP(stp, op);
- op->op = genop_i_wait_timeout_2;
+ op->op = genop_wait_timeout_unlocked_int_2;
op->next = NULL;
op->arity = 2;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ op->a[0].type = TAG_u;
+ op->a[1] = Fail;
if (Time.type == TAG_i && (timeout = Time.val) >= 0 &&
#if defined(ARCH_64)
@@ -3413,7 +3673,7 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
1
#endif
) {
- op->a[1].val = timeout;
+ op->a[0].val = timeout;
#if !defined(ARCH_64)
} else if (Time.type == TAG_q) {
Eterm big;
@@ -3427,7 +3687,7 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
} else {
Uint u;
(void) term_to_Uint(big, &u);
- op->a[1].val = (BeamInstr) u;
+ op->a[0].val = (BeamInstr) u;
}
#endif
} else {
@@ -3447,12 +3707,12 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
Sint timeout;
NEW_GENOP(stp, op);
- op->op = genop_i_wait_timeout_locked_2;
+ op->op = genop_wait_timeout_locked_int_2;
op->next = NULL;
op->arity = 2;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
-
+ op->a[0].type = TAG_u;
+ op->a[1] = Fail;
+
if (Time.type == TAG_i && (timeout = Time.val) >= 0 &&
#if defined(ARCH_64)
(timeout >> 32) == 0
@@ -3460,7 +3720,7 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
1
#endif
) {
- op->a[1].val = timeout;
+ op->a[0].val = timeout;
#if !defined(ARCH_64)
} else if (Time.type == TAG_q) {
Eterm big;
@@ -3474,7 +3734,7 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
} else {
Uint u;
(void) term_to_Uint(big, &u);
- op->a[1].val = (BeamInstr) u;
+ op->a[0].val = (BeamInstr) u;
}
#endif
} else {
@@ -3520,7 +3780,7 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
if (size == 2) {
NEW_GENOP(stp, op);
op->next = NULL;
- op->op = genop_i_select_tuple_arity2_6;
+ op->op = genop_i_select_tuple_arity2_4;
GENOP_ARITY(op, arity - 1);
op->a[0] = S;
op->a[1] = Fail;
@@ -3810,14 +4070,13 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
int i, j, align = 0;
if (size == 2) {
-
/*
* Use a special-cased instruction if there are only two values.
*/
NEW_GENOP(stp, op);
op->next = NULL;
- op->op = genop_i_select_val2_6;
+ op->op = genop_i_select_val2_4;
GENOP_ARITY(op, arity - 1);
op->a[0] = S;
op->a[1] = Fail;
@@ -3827,47 +4086,19 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
op->a[5] = Rest[3];
return op;
-
- } else if (size > 10) {
-
- /* binary search instruction */
-
- NEW_GENOP(stp, op);
- op->next = NULL;
- op->op = genop_i_select_val_bins_3;
- GENOP_ARITY(op, arity);
- op->a[0] = S;
- op->a[1] = Fail;
- op->a[2].type = TAG_u;
- op->a[2].val = size;
- for (i = 3; i < arity; i++) {
- op->a[i] = Rest[i-3];
- }
-
- /*
- * Sort the values to make them useful for a binary search.
- */
-
- qsort(op->a+3, size, 2*sizeof(GenOpArg),
- (int (*)(const void *, const void *)) genopargcompare);
-#ifdef DEBUG
- for (i = 3; i < arity-2; i += 2) {
- ASSERT(op->a[i].val < op->a[i+2].val);
- }
-#endif
- return op;
}
- /* linear search instruction */
-
- align = 1;
+ if (size <= 10) {
+ /* Use linear search. Reserve place for a sentinel. */
+ align = 1;
+ }
arity += 2*align;
size += align;
NEW_GENOP(stp, op);
op->next = NULL;
- op->op = genop_i_select_val_lins_3;
+ op->op = (align == 0) ? genop_i_select_val_bins_3 : genop_i_select_val_lins_3;
GENOP_ARITY(op, arity);
op->a[0] = S;
op->a[1] = Fail;
@@ -3881,7 +4112,7 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
}
/*
- * Sort the values to make them useful for a sentinel search
+ * Sort the values to make them useful for a binary or sentinel search.
*/
qsort(tmp, size - align, 2*sizeof(GenOpArg),
@@ -3896,11 +4127,12 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
erts_free(ERTS_ALC_T_LOADER_TMP, (void *) tmp);
- /* add sentinel */
-
- op->a[j].type = TAG_u;
- op->a[j].val = ~((BeamInstr)0);
- op->a[j+size] = Fail;
+ if (align) {
+ /* Add sentinel for linear search. */
+ op->a[j].type = TAG_u;
+ op->a[j].val = ~((BeamInstr)0);
+ op->a[j+size] = Fail;
+ }
#ifdef DEBUG
for (i = 0; i < size - 1; i++) {
@@ -4031,60 +4263,52 @@ gen_make_fun2(LoaderState* stp, GenOpArg idx)
op->next = NULL;
return op;
}
+
+static GenOp*
+translate_gc_bif(LoaderState* stp, GenOp* op, GenOpArg Bif)
+{
+ const ErtsGcBif* p;
+ BifFunction bf;
+
+ bf = stp->import[Bif.val].bf;
+ for (p = erts_gc_bifs; p->bif != 0; p++) {
+ if (p->bif == bf) {
+ op->a[1].type = TAG_u;
+ op->a[1].val = (BeamInstr) p->gc_bif;
+ return op;
+ }
+ }
+
+ op->op = genop_unsupported_guard_bif_3;
+ op->arity = 3;
+ op->a[0].type = TAG_a;
+ op->a[0].val = stp->import[Bif.val].module;
+ op->a[1].type = TAG_a;
+ op->a[1].val = stp->import[Bif.val].function;
+ op->a[2].type = TAG_u;
+ op->a[2].val = stp->import[Bif.val].arity;
+ return op;
+}
+
/*
- * Rewrite gc_bifs with one parameter (the common case). Utilized
- * in ops.tab to rewrite instructions calling bif's in guards
- * to use a garbage collecting implementation.
+ * Rewrite gc_bifs with one parameter (the common case).
*/
static GenOp*
gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg Src, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == length_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_length_1;
- } else if (bf == size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_size_1;
- } else if (bf == bit_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_bit_size_1;
- } else if (bf == byte_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_byte_size_1;
- } else if (bf == map_size_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_map_size_1;
- } else if (bf == abs_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_abs_1;
- } else if (bf == float_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_float_1;
- } else if (bf == round_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_round_1;
- } else if (bf == trunc_1) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_trunc_1;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
op->op = genop_i_gc_bif1_5;
op->arity = 5;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ /* op->a[1] is set by translate_gc_bif() */
op->a[2] = Src;
op->a[3] = Live;
op->a[4] = Dst;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
/*
@@ -4095,35 +4319,18 @@ gen_guard_bif2(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg S1, GenOpArg S2, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == binary_part_2) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_2;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
op->op = genop_i_gc_bif2_6;
op->arity = 6;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ /* op->a[1] is set by translate_gc_bif() */
op->a[2] = Live;
op->a[3] = S1;
op->a[4] = S2;
op->a[5] = Dst;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
/*
@@ -4134,37 +4341,19 @@ gen_guard_bif3(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
GenOpArg S1, GenOpArg S2, GenOpArg S3, GenOpArg Dst)
{
GenOp* op;
- BifFunction bf;
NEW_GENOP(stp, op);
op->next = NULL;
- bf = stp->import[Bif.val].bf;
- /* The translations here need to have a reverse counterpart in
- beam_emu.c:translate_gc_bif for error handling to work properly. */
- if (bf == binary_part_3) {
- op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_3;
- } else {
- op->op = genop_unsupported_guard_bif_3;
- op->arity = 3;
- op->a[0].type = TAG_a;
- op->a[0].val = stp->import[Bif.val].module;
- op->a[1].type = TAG_a;
- op->a[1].val = stp->import[Bif.val].function;
- op->a[2].type = TAG_u;
- op->a[2].val = stp->import[Bif.val].arity;
- return op;
- }
op->op = genop_ii_gc_bif3_7;
op->arity = 7;
op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ /* op->a[1] is set by translate_gc_bif() */
op->a[2] = Live;
op->a[3] = S1;
op->a[4] = S2;
op->a[5] = S3;
op->a[6] = Dst;
- op->next = NULL;
- return op;
+ return translate_gc_bif(stp, op, Bif);
}
static GenOp*
@@ -4231,6 +4420,92 @@ literal_is_map(LoaderState* stp, GenOpArg Lit)
}
/*
+ * Predicate to test whether all of the given new small map keys are literals
+ */
+static int
+is_small_map_literal_keys(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
+{
+ if (Size.val > MAP_SMALL_MAP_LIMIT) {
+ return 0;
+ }
+
+ /*
+ * Operations with non-literals have always only one key.
+ */
+ if (Size.val != 2) {
+ return 1;
+ }
+
+ switch (Rest[0].type) {
+ case TAG_a:
+ case TAG_i:
+ case TAG_n:
+ case TAG_q:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static GenOp*
+gen_new_small_map_lit(LoaderState* stp, GenOpArg Dst, GenOpArg Live,
+ GenOpArg Size, GenOpArg* Rest)
+{
+ unsigned size = Size.val;
+ Uint lit;
+ unsigned i;
+ GenOp* op;
+ GenOpArg* dst;
+ Eterm* hp;
+ Eterm* tmp;
+ Eterm* thp;
+ Eterm keys;
+
+ NEW_GENOP(stp, op);
+ GENOP_ARITY(op, 3 + size/2);
+ op->next = NULL;
+ op->op = genop_i_new_small_map_lit_3;
+
+ tmp = thp = erts_alloc(ERTS_ALC_T_LOADER_TMP, (1 + size/2) * sizeof(*tmp));
+ keys = make_tuple(thp);
+ *thp++ = make_arityval(size/2);
+
+ dst = op->a+3;
+
+ for (i = 0; i < size; i += 2) {
+ switch (Rest[i].type) {
+ case TAG_a:
+ *thp++ = Rest[i].val;
+ ASSERT(is_atom(Rest[i].val));
+ break;
+ case TAG_i:
+ *thp++ = make_small(Rest[i].val);
+ break;
+ case TAG_n:
+ *thp++ = NIL;
+ break;
+ case TAG_q:
+ *thp++ = stp->literals[Rest[i].val].term;
+ break;
+ }
+ *dst++ = Rest[i + 1];
+ }
+
+ if (!find_literal(stp, keys, &lit)) {
+ lit = new_literal(stp, &hp, 1 + size/2);
+ sys_memcpy(hp, tmp, (1 + size/2) * sizeof(*tmp));
+ }
+ erts_free(ERTS_ALC_T_LOADER_TMP, tmp);
+
+ op->a[0] = Dst;
+ op->a[1] = Live;
+ op->a[2].type = TAG_q;
+ op->a[2].val = lit;
+
+ return op;
+}
+
+/*
* Predicate to test whether the given literal is an empty map.
*/
@@ -4247,6 +4522,19 @@ is_empty_map(LoaderState* stp, GenOpArg Lit)
}
/*
+ * Predicate to test whether the given literal is an export.
+ */
+static int
+literal_is_export(LoaderState* stp, GenOpArg Lit)
+{
+ Eterm term;
+
+ ASSERT(Lit.type == TAG_q);
+ term = stp->literals[Lit.val].term;
+ return is_export(term);
+}
+
+/*
* Pseudo predicate map_key_sort that will sort the Rest operand for
* map instructions as a side effect.
*/
@@ -4386,22 +4674,25 @@ gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
static int
hash_internal_genop_arg(LoaderState* stp, GenOpArg Key, Uint32* hx)
{
+ Eterm key_term;
switch (Key.type) {
case TAG_a:
- *hx = atom_tab(atom_val(Key.val))->slot.bucket.hvalue;
- return 1;
+ key_term = Key.val;
+ break;
case TAG_i:
- *hx = Key.val;
- return 1;
+ key_term = make_small(Key.val);
+ break;
case TAG_n:
- *hx = make_internal_hash(NIL);
- return 1;
+ key_term = NIL;
+ break;
case TAG_q:
- *hx = make_internal_hash(stp->literals[Key.val].term);
- return 1;
+ key_term = stp->literals[Key.val].term;
+ break;
default:
return 0;
}
+ *hx = erts_pd_make_hx(key_term);
+ return 1;
}
@@ -4556,7 +4847,7 @@ freeze_code(LoaderState* stp)
* function table in the beginning of the file.
*/
- code_hdr->functions[stp->num_functions] = (codev + stp->ci - 1);
+ code_hdr->functions[stp->num_functions] = (ErtsCodeInfo*)(codev + stp->ci - 1);
CHKBLK(ERTS_ALC_T_CODE,code_hdr);
/*
@@ -4638,7 +4929,9 @@ freeze_code(LoaderState* stp)
line_items[i] = codev + stp->ci - 1;
line_tab->fname_ptr = (Eterm*) &line_items[i + 1];
- memcpy(line_tab->fname_ptr, stp->fname, stp->num_fnames*sizeof(Eterm));
+ if (stp->num_fnames)
+ sys_memcpy(line_tab->fname_ptr, stp->fname,
+ stp->num_fnames*sizeof(Eterm));
line_tab->loc_size = stp->loc_size;
if (stp->loc_size == 2) {
@@ -4738,21 +5031,57 @@ freeze_code(LoaderState* stp)
*/
for (i = 0; i < stp->num_labels; i++) {
- Sint this_patch;
- Sint next_patch;
+ Uint patch;
Uint value = stp->labels[i].value;
-
- if (value == 0 && stp->labels[i].patches >= 0) {
+
+ if (value == 0 && stp->labels[i].num_patches != 0) {
LoadError1(stp, "label %d not resolved", i);
}
ASSERT(value < stp->ci);
- this_patch = stp->labels[i].patches;
- while (this_patch >= 0) {
- ASSERT(this_patch < stp->ci);
- next_patch = codev[this_patch];
- ASSERT(next_patch < stp->ci);
- codev[this_patch] = (BeamInstr) (codev + value);
- this_patch = next_patch;
+ for (patch = 0; patch < stp->labels[i].num_patches; patch++) {
+ LabelPatch* lp = &stp->labels[i].patches[patch];
+ Uint pos = lp->pos;
+ ASSERT(pos < stp->ci);
+ if (pos < stp->num_functions) {
+ /*
+ * This is the array of pointers to the beginning of
+ * each function. The pointers must remain absolute.
+ */
+ codev[pos] = (BeamInstr) (codev + value);
+ } else {
+#if defined(DEBUG) && defined(BEAM_WIDE_MASK)
+ Uint w;
+#endif
+ Sint32 rel = lp->offset + value;
+ switch (lp->packed) {
+ case 0: /* Not packed */
+ ASSERT(codev[pos] == i);
+ codev[pos] = rel;
+ break;
+#ifdef BEAM_WIDE_MASK
+ case 1: /* Least significant word. */
+#ifdef DEBUG
+ w = codev[pos] & BEAM_WIDE_MASK;
+ /* Correct label in least significant word? */
+ ASSERT(w == i);
+#endif
+ codev[pos] = (codev[pos] & ~BEAM_WIDE_MASK) |
+ (rel & BEAM_WIDE_MASK);
+ break;
+ case 2: /* Most significant word */
+#ifdef DEBUG
+ w = (codev[pos] >> BEAM_WIDE_SHIFT) & BEAM_WIDE_MASK;
+ /* Correct label in most significant word? */
+ ASSERT(w == i);
+#endif
+ codev[pos] = ((Uint)rel << BEAM_WIDE_SHIFT) |
+ (codev[pos] & BEAM_WIDE_MASK);
+ break;
+#endif
+ default:
+ ASSERT(0);
+ }
+ }
}
}
CHKBLK(ERTS_ALC_T_CODE,code_hdr);
@@ -4795,8 +5124,11 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p)
catches = BEAM_CATCHES_NIL;
while (index != 0) {
BeamInstr next = codev[index];
- codev[index] = BeamOpCode(op_catch_yf);
- catches = beam_catches_cons((BeamInstr *)codev[index+2], catches);
+ BeamInstr* abs_addr;
+ codev[index] = BeamOpCodeAddr(op_catch_yf);
+ /* We must make the address of the label absolute again. */
+ abs_addr = (BeamInstr *)codev + index + codev[index+2];
+ catches = beam_catches_cons(abs_addr, catches);
codev[index+2] = make_catch(catches);
index = next;
}
@@ -4816,16 +5148,16 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p)
}
ep = erts_export_put(stp->module, stp->export[i].function,
stp->export[i].arity);
- if (!on_load) {
- ep->addressv[erts_staging_code_ix()] = address;
- } else {
+ if (on_load) {
/*
* on_load: Don't make any of the exported functions
* callable yet. Keep any function in the current
* code callable.
*/
- ep->code[4] = (BeamInstr) address;
+ ep->beam[1] = (BeamInstr) address;
}
+ else
+ ep->addressv[erts_staging_code_ix()] = address;
}
/*
@@ -4871,7 +5203,7 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p)
}
fe->address = code_ptr;
#ifdef HIPE
- hipe_set_closure_stub(fe, stp->lambdas[i].num_free);
+ hipe_set_closure_stub(fe);
#endif
}
}
@@ -4882,7 +5214,7 @@ transform_engine(LoaderState* st)
{
Uint op;
int ap; /* Current argument. */
- Uint* restart; /* Where to restart if current match fails. */
+ const Uint* restart; /* Where to restart if current match fails. */
GenOpArg var[TE_MAX_VARS]; /* Buffer for variables. */
GenOpArg* rest_args = NULL;
int num_rest_args = 0;
@@ -4891,7 +5223,7 @@ transform_engine(LoaderState* st)
GenOp* instr;
GenOp* first = st->genop;
GenOp* keep = NULL;
- Uint* pc;
+ const Uint* pc;
static Uint restart_fail[1] = {TOP_fail};
ASSERT(gen_opc[first->op].transform != -1);
@@ -5002,7 +5334,7 @@ transform_engine(LoaderState* st)
if (i >= st->num_imports || st->import[i].bf == NULL)
goto restart;
if (bif_number != -1 &&
- bif_export[bif_number]->code[4] != (BeamInstr) st->import[i].bf) {
+ bif_export[bif_number]->beam[1] != (BeamInstr) st->import[i].bf) {
goto restart;
}
}
@@ -5182,8 +5514,8 @@ transform_engine(LoaderState* st)
case TOP_store_rest_args:
{
GENOP_ARITY(instr, instr->arity+num_rest_args);
- memcpy(instr->a, instr->def_args, ap*sizeof(GenOpArg));
- memcpy(instr->a+ap, rest_args, num_rest_args*sizeof(GenOpArg));
+ sys_memcpy(instr->a, instr->def_args, ap*sizeof(GenOpArg));
+ sys_memcpy(instr->a+ap, rest_args, num_rest_args*sizeof(GenOpArg));
ap += num_rest_args;
}
break;
@@ -5263,12 +5595,15 @@ get_tag_and_value(LoaderState* stp, Uint len_code,
{
Uint count;
Sint val;
- byte default_buf[128];
- byte* bigbuf = default_buf;
+ byte default_byte_buf[128];
+ byte* byte_buf = default_byte_buf;
+ Eterm default_big_buf[128/sizeof(Eterm)];
+ Eterm* big_buf = default_big_buf;
+ Eterm tmp_big;
byte* s;
int i;
int neg = 0;
- Uint arity;
+ Uint words_needed;
Eterm* hp;
/*
@@ -5345,8 +5680,11 @@ get_tag_and_value(LoaderState* stp, Uint len_code,
*result = val;
return TAG_i;
} else {
- *result = new_literal(stp, &hp, BIG_UINT_HEAP_SIZE);
- (void) small_to_big(val, hp);
+ tmp_big = small_to_big(val, big_buf);
+ if (!find_literal(stp, tmp_big, result)) {
+ *result = new_literal(stp, &hp, BIG_UINT_HEAP_SIZE);
+ sys_memcpy(hp, big_buf, BIG_UINT_HEAP_SIZE*sizeof(Eterm));
+ }
return TAG_q;
}
}
@@ -5356,8 +5694,8 @@ get_tag_and_value(LoaderState* stp, Uint len_code,
* (including margin).
*/
- if (count+8 > sizeof(default_buf)) {
- bigbuf = erts_alloc(ERTS_ALC_T_LOADER_TMP, count+8);
+ if (count+8 > sizeof(default_byte_buf)) {
+ byte_buf = erts_alloc(ERTS_ALC_T_LOADER_TMP, count+8);
}
/*
@@ -5366,20 +5704,20 @@ get_tag_and_value(LoaderState* stp, Uint len_code,
GetString(stp, s, count);
for (i = 0; i < count; i++) {
- bigbuf[count-i-1] = *s++;
+ byte_buf[count-i-1] = *s++;
}
/*
* Check if the number is negative, and negate it if so.
*/
- if ((bigbuf[count-1] & 0x80) != 0) {
+ if ((byte_buf[count-1] & 0x80) != 0) {
unsigned carry = 1;
neg = 1;
for (i = 0; i < count; i++) {
- bigbuf[i] = ~bigbuf[i] + carry;
- carry = (bigbuf[i] == 0 && carry == 1);
+ byte_buf[i] = ~byte_buf[i] + carry;
+ carry = (byte_buf[i] == 0 && carry == 1);
}
ASSERT(carry == 0);
}
@@ -5388,33 +5726,52 @@ get_tag_and_value(LoaderState* stp, Uint len_code,
* Align to word boundary.
*/
- if (bigbuf[count-1] == 0) {
+ if (byte_buf[count-1] == 0) {
count--;
}
- if (bigbuf[count-1] == 0) {
+ if (byte_buf[count-1] == 0) {
LoadError0(stp, "bignum not normalized");
}
while (count % sizeof(Eterm) != 0) {
- bigbuf[count++] = 0;
+ byte_buf[count++] = 0;
}
/*
- * Allocate heap space for the bignum and copy it.
+ * Convert to a bignum.
*/
- arity = count/sizeof(Eterm);
- *result = new_literal(stp, &hp, arity+1);
- if (is_nil(bytes_to_big(bigbuf, count, neg, hp)))
- goto load_error;
+ words_needed = count/sizeof(Eterm) + 1;
+ if (words_needed*sizeof(Eterm) > sizeof(default_big_buf)) {
+ big_buf = erts_alloc(ERTS_ALC_T_LOADER_TMP, words_needed*sizeof(Eterm));
+ }
+ tmp_big = bytes_to_big(byte_buf, count, neg, big_buf);
+ if (is_nil(tmp_big)) {
+ goto load_error;
+ }
- if (bigbuf != default_buf) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) bigbuf);
+ /*
+ * Create a literal if there is no previous literal with the same value.
+ */
+
+ if (!find_literal(stp, tmp_big, result)) {
+ *result = new_literal(stp, &hp, words_needed);
+ sys_memcpy(hp, big_buf, words_needed*sizeof(Eterm));
+ }
+
+ if (byte_buf != default_byte_buf) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) byte_buf);
+ }
+ if (big_buf != default_big_buf) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) big_buf);
}
return TAG_q;
load_error:
- if (bigbuf != default_buf) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) bigbuf);
+ if (byte_buf != default_byte_buf) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) byte_buf);
+ }
+ if (big_buf != default_big_buf) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) big_buf);
}
return -1;
}
@@ -5459,8 +5816,7 @@ new_label(LoaderState* stp)
stp->labels = (Label *) erts_realloc(ERTS_ALC_T_PREPARED_CODE,
(void *) stp->labels,
stp->num_labels * sizeof(Label));
- stp->labels[num].value = 0;
- stp->labels[num].patches = -1;
+ init_label(&stp->labels[num]);
return num;
}
@@ -5515,6 +5871,24 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size)
return stp->num_literals++;
}
+static int
+find_literal(LoaderState* stp, Eterm needle, Uint *idx)
+{
+ int i;
+
+ /*
+ * The search is done backwards since the most recent literals
+ * allocated by the loader itself will be placed at the end
+ */
+ for (i = stp->num_literals - 1; i >= 0; i--) {
+ if (EQ(needle, stp->literals[i].term)) {
+ *idx = (Uint) i;
+ return 1;
+ }
+ }
+ return 0;
+}
+
Eterm
erts_module_info_0(Process* p, Eterm module)
{
@@ -5594,6 +5968,8 @@ get_module_info(Process* p, ErtsCodeIndex code_ix, BeamCodeHeader* code_hdr,
return exported_from_module(p, code_ix, module);
} else if (what == am_functions) {
return functions_in_module(p, code_hdr);
+ } else if (what == am_nifs) {
+ return nifs_in_module(p, module);
} else if (what == am_attributes) {
return attributes_for_module(p, code_hdr);
} else if (what == am_compile) {
@@ -5627,18 +6003,16 @@ functions_in_module(Process* p, /* Process whose heap to use. */
hp = HAlloc(p, need);
hp_end = hp + need;
for (i = num_functions-1; i >= 0 ; i--) {
- BeamInstr* func_info = code_hdr->functions[i];
- Eterm name = (Eterm) func_info[3];
- int arity = (int) func_info[4];
+ ErtsCodeInfo* ci = code_hdr->functions[i];
Eterm tuple;
/*
* If the function name is [], this entry is a stub for
* a BIF that should be ignored.
*/
- ASSERT(is_atom(name) || is_nil(name));
- if (is_atom(name)) {
- tuple = TUPLE2(hp, name, make_small(arity));
+ ASSERT(is_atom(ci->mfa.function) || is_nil(ci->mfa.function));
+ if (is_atom(ci->mfa.function)) {
+ tuple = TUPLE2(hp, ci->mfa.function, make_small(ci->mfa.arity));
hp += 3;
result = CONS(hp, tuple, result);
hp += 2;
@@ -5649,6 +6023,46 @@ functions_in_module(Process* p, /* Process whose heap to use. */
}
/*
+ * Builds a list of all NIFs in the given module:
+ * [{Name, Arity},...]
+ */
+Eterm
+nifs_in_module(Process* p, Eterm module)
+{
+ Eterm nif_list, *hp;
+ Module *mod;
+
+ mod = erts_get_module(module, erts_active_code_ix());
+ nif_list = NIL;
+
+ if (mod->curr.nif != NULL) {
+ int func_count, func_ix;
+ ErlNifFunc *funcs;
+
+ func_count = erts_nif_get_funcs(mod->curr.nif, &funcs);
+ hp = HAlloc(p, func_count * 5);
+
+ for (func_ix = func_count - 1; func_ix >= 0; func_ix--) {
+ Eterm name, arity, pair;
+ ErlNifFunc *func;
+
+ func = &funcs[func_ix];
+
+ name = am_atom_put(func->name, sys_strlen(func->name));
+ arity = make_small(func->arity);
+
+ pair = TUPLE2(hp, name, arity);
+ hp += 3;
+
+ nif_list = CONS(hp, pair, nif_list);
+ hp += 2;
+ }
+ }
+
+ return nif_list;
+}
+
+/*
* Returns 'true' if mod has any native compiled functions, otherwise 'false'
*/
@@ -5673,15 +6087,36 @@ erts_release_literal_area(ErtsLiteralArea* literal_area)
return;
oh = literal_area->off_heap;
-
+
while (oh) {
- Binary* bptr;
- ASSERT(thing_subtag(oh->thing_word) == REFC_BINARY_SUBTAG);
- bptr = ((ProcBin*)oh)->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0) {
- erts_bin_free(bptr);
- }
- oh = oh->next;
+ switch (thing_subtag(oh->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ {
+ Binary* bptr = ((ProcBin*)oh)->val;
+ erts_bin_release(bptr);
+ break;
+ }
+ case FUN_SUBTAG:
+ {
+ ErlFunEntry* fe = ((ErlFunThing*)oh)->fe;
+ if (erts_refc_dectest(&fe->refc, 0) == 0) {
+ erts_erase_fun_entry(fe);
+ }
+ break;
+ }
+ case REF_SUBTAG:
+ {
+ ErtsMagicBinary *bptr;
+ ASSERT(is_magic_ref_thing(oh));
+ bptr = ((ErtsMRefThing *) oh)->mb;
+ erts_bin_release((Binary *) bptr);
+ break;
+ }
+ default:
+ ASSERT(is_external_header(oh->thing_word));
+ erts_deref_node_entry(((ExternalThing*)oh)->node);
+ }
+ oh = oh->next;
}
erts_free(ERTS_ALC_T_LITERAL, literal_area);
}
@@ -5695,17 +6130,28 @@ erts_is_module_native(BeamCodeHeader* code_hdr)
if (code_hdr != NULL) {
num_functions = code_hdr->num_functions;
for (i=0; i<num_functions; i++) {
- BeamInstr* func_info = (BeamInstr *) code_hdr->functions[i];
- Eterm name = (Eterm) func_info[3];
- if (is_atom(name)) {
- return func_info[1] != 0;
+ ErtsCodeInfo* ci = code_hdr->functions[i];
+ if (is_atom(ci->mfa.function)) {
+ return erts_is_function_native(ci);
}
- else ASSERT(is_nil(name)); /* ignore BIF stubs */
+ else ASSERT(is_nil(ci->mfa.function)); /* ignore BIF stubs */
}
}
return 0;
}
+int
+erts_is_function_native(ErtsCodeInfo *ci)
+{
+#ifdef HIPE
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
+ return BeamIsOpCode(erts_codeinfo_to_code(ci)[0], op_hipe_trap_call) ||
+ BeamIsOpCode(erts_codeinfo_to_code(ci)[0], op_hipe_trap_call_closure);
+#else
+ return 0;
+#endif
+}
+
/*
* Builds a list of all functions including native addresses.
* [{Name,Arity,NativeAddress},...]
@@ -5714,33 +6160,35 @@ erts_is_module_native(BeamCodeHeader* code_hdr)
static Eterm
native_addresses(Process* p, BeamCodeHeader* code_hdr)
{
+ Eterm result = NIL;
+#ifdef HIPE
int i;
Eterm* hp;
Uint num_functions;
Uint need;
Eterm* hp_end;
- Eterm result = NIL;
num_functions = code_hdr->num_functions;
need = (6+BIG_UINT_HEAP_SIZE)*num_functions;
hp = HAlloc(p, need);
hp_end = hp + need;
for (i = num_functions-1; i >= 0 ; i--) {
- BeamInstr* func_info = code_hdr->functions[i];
- Eterm name = (Eterm) func_info[3];
- int arity = (int) func_info[4];
+ ErtsCodeInfo *ci = code_hdr->functions[i];
Eterm tuple;
- ASSERT(is_atom(name) || is_nil(name)); /* [] if BIF stub */
- if (func_info[1] != 0) {
- Eterm addr;
- ASSERT(is_atom(name));
- addr = erts_bld_uint(&hp, NULL, func_info[1]);
- tuple = erts_bld_tuple(&hp, NULL, 3, name, make_small(arity), addr);
+ ASSERT(is_atom(ci->mfa.function)
+ || is_nil(ci->mfa.function)); /* [] if BIF stub */
+ if (ci->u.ncallee != NULL) {
+ Eterm addr;
+ ASSERT(is_atom(ci->mfa.function));
+ addr = erts_bld_uint(&hp, NULL, (Uint)ci->u.ncallee);
+ tuple = erts_bld_tuple(&hp, NULL, 3, ci->mfa.function,
+ make_small(ci->mfa.arity), addr);
result = erts_bld_cons(&hp, NULL, tuple, result);
}
}
HRelease(p, hp_end, hp);
+#endif
return result;
}
@@ -5754,19 +6202,20 @@ exported_from_module(Process* p, /* Process whose heap to use. */
ErtsCodeIndex code_ix,
Eterm mod) /* Tagged atom for module. */
{
- int i;
+ int i, num_exps;
Eterm* hp = NULL;
Eterm* hend = NULL;
Eterm result = NIL;
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export* ep = export_list(i,code_ix);
- if (ep->code[0] == mod) {
+ if (ep->info.mfa.module == mod) {
Eterm tuple;
- if (ep->addressv[code_ix] == ep->code+3 &&
- ep->code[3] == (BeamInstr) em_call_error_handler) {
+ if (ep->addressv[code_ix] == ep->beam &&
+ BeamIsOpCode(ep->beam[0], op_call_error_handler)) {
/* There is a call to the function, but it does not exist. */
continue;
}
@@ -5776,7 +6225,8 @@ exported_from_module(Process* p, /* Process whose heap to use. */
hp = HAlloc(p, need);
hend = hp + need;
}
- tuple = TUPLE2(hp, ep->code[1], make_small(ep->code[2]));
+ tuple = TUPLE2(hp, ep->info.mfa.function,
+ make_small(ep->info.mfa.arity));
hp += 3;
result = CONS(hp, tuple, result);
hp += 2;
@@ -5850,7 +6300,6 @@ md5_of_module(Process* p, /* Process whose heap to use. */
Eterm*
erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
{
- BeamInstr* current = fi->current;
Eterm loc = NIL;
if (fi->loc != LINE_INVALID_LOCATION) {
@@ -5860,12 +6309,11 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
Eterm file_term = NIL;
if (file == 0) {
- Atom* ap = atom_tab(atom_val(fi->current[0]));
+ Atom* ap = atom_tab(atom_val(fi->mfa->module));
file_term = buf_to_intlist(&hp, ".erl", 4, NIL);
file_term = buf_to_intlist(&hp, (char*)ap->name, ap->len, file_term);
} else {
- Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
- file_term = buf_to_intlist(&hp, (char*)ap->name, ap->len, NIL);
+ file_term = erts_atom_to_string(&hp, (fi->fname_ptr)[file-1]);
}
tuple = TUPLE2(hp, am_line, make_small(line));
@@ -5879,10 +6327,12 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
}
if (is_list(args) || is_nil(args)) {
- *mfa_p = TUPLE4(hp, current[0], current[1], args, loc);
+ *mfa_p = TUPLE4(hp, fi->mfa->module, fi->mfa->function,
+ args, loc);
} else {
- Eterm arity = make_small(current[2]);
- *mfa_p = TUPLE4(hp, current[0], current[1], arity, loc);
+ Eterm arity = make_small(fi->mfa->arity);
+ *mfa_p = TUPLE4(hp, fi->mfa->module, fi->mfa->function,
+ arity, loc);
}
return hp + 5;
}
@@ -5893,9 +6343,9 @@ erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
* the function.
*/
void
-erts_set_current_function(FunctionInfo* fi, BeamInstr* current)
+erts_set_current_function(FunctionInfo* fi, ErtsCodeMFA* mfa)
{
- fi->current = current;
+ fi->mfa = mfa;
fi->needed = 5;
fi->loc = LINE_INVALID_LOCATION;
}
@@ -5904,13 +6354,13 @@ erts_set_current_function(FunctionInfo* fi, BeamInstr* current)
/*
* Returns a pointer to {module, function, arity}, or NULL if not found.
*/
-BeamInstr*
+ErtsCodeMFA*
find_function_from_pc(BeamInstr* pc)
{
FunctionInfo fi;
erts_lookup_function_info(&fi, pc, 0);
- return fi.current;
+ return fi.mfa;
}
/*
@@ -5965,7 +6415,7 @@ code_get_chunk_2(BIF_ALIST_2)
goto error;
}
if (!init_iff_file(stp, start, binary_size(Bin)) ||
- !scan_iff_file(stp, &chunk, 1, 1) ||
+ !scan_iff_file(stp, &chunk, 1) ||
stp->chunks[0].start == NULL) {
res = am_undefined;
goto done;
@@ -6014,7 +6464,7 @@ code_module_md5_1(BIF_ALIST_1)
}
stp->module = THE_NON_VALUE; /* Suppress diagnostiscs */
if (!init_iff_file(stp, bytes, binary_size(Bin)) ||
- !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
res = am_undefined;
goto done;
@@ -6027,24 +6477,21 @@ code_module_md5_1(BIF_ALIST_1)
return res;
}
-#define WORDS_PER_FUNCTION 6
+#ifdef HIPE
+#define WORDS_PER_FUNCTION (sizeof(ErtsCodeInfo) / sizeof(UWord) + 1)
static BeamInstr*
-make_stub(BeamInstr* fp, Eterm mod, Eterm func, Uint arity, Uint native, BeamInstr OpCode)
+make_stub(ErtsCodeInfo* info, Eterm mod, Eterm func, Uint arity, Uint native, BeamInstr OpCode)
{
- fp[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
- fp[1] = native;
- fp[2] = mod;
- fp[3] = func;
- fp[4] = arity;
-#ifdef HIPE
- if (native) {
- fp[5] = BeamOpCode(op_move_return_n);
- hipe_mfa_save_orig_beam_op(mod, func, arity, fp+5);
- }
-#endif
- fp[5] = OpCode;
- return fp + WORDS_PER_FUNCTION;
+ DBG_TRACE_MFA(mod,func,arity,"make beam stub at %p", erts_codeinfo_to_code(info));
+ ASSERT(WORDS_PER_FUNCTION == 6);
+ info->op = BeamOpCodeAddr(op_i_func_info_IaaI);
+ info->u.ncallee = (void (*)(void)) native;
+ info->mfa.module = mod;
+ info->mfa.function = func;
+ info->mfa.arity = arity;
+ erts_codeinfo_to_code(info)[0] = OpCode;
+ return erts_codeinfo_to_code(info)+1;
}
static byte*
@@ -6058,7 +6505,7 @@ stub_copy_info(LoaderState* stp,
Sint decoded_size;
Uint size = stp->chunks[chunk].size;
if (size != 0) {
- memcpy(info, stp->chunks[chunk].start, size);
+ sys_memcpy(info, stp->chunks[chunk].start, size);
*ptr_word = info;
decoded_size = erts_decode_ext_size(info, size);
if (decoded_size < 0) {
@@ -6103,22 +6550,17 @@ stub_read_export_table(LoaderState* stp)
}
static void
-stub_final_touch(LoaderState* stp, BeamInstr* fp)
+stub_final_touch(LoaderState* stp, ErtsCodeInfo* ci)
{
unsigned int i;
unsigned int n = stp->num_exps;
- Eterm mod = fp[2];
- Eterm function = fp[3];
- int arity = fp[4];
-#ifdef HIPE
Lambda* lp;
-#endif
- if (is_bif(mod, function, arity)) {
- fp[1] = 0;
- fp[2] = 0;
- fp[3] = 0;
- fp[4] = 0;
+ if (is_bif(ci->mfa.module, ci->mfa.function, ci->mfa.arity)) {
+ ci->u.ncallee = NULL;
+ ci->mfa.module = 0;
+ ci->mfa.function = 0;
+ ci->mfa.arity = 0;
return;
}
@@ -6127,9 +6569,14 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
*/
for (i = 0; i < n; i++) {
- if (stp->export[i].function == function && stp->export[i].arity == arity) {
- Export* ep = erts_export_put(mod, function, arity);
- ep->addressv[erts_staging_code_ix()] = fp+5;
+ if (stp->export[i].function == ci->mfa.function &&
+ stp->export[i].arity == ci->mfa.arity) {
+ Export* ep = erts_export_put(ci->mfa.module,
+ ci->mfa.function,
+ ci->mfa.arity);
+ ep->addressv[erts_staging_code_ix()] = erts_codeinfo_to_code(ci);
+ DBG_TRACE_MFA_P(&ci->mfa,"set beam stub at %p in export at %p (code_ix=%d)",
+ erts_codeinfo_to_code(ci), ep, erts_staging_code_ix());
return;
}
}
@@ -6139,16 +6586,14 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
* Search the lambda table to find out which.
*/
-#ifdef HIPE
n = stp->num_lambdas;
for (i = 0, lp = stp->lambdas; i < n; i++, lp++) {
ErlFunEntry* fe = stp->lambdas[i].fe;
- if (lp->function == function && lp->arity == arity) {
- fp[5] = (Eterm) BeamOpCode(op_hipe_trap_call_closure);
- fe->address = &(fp[5]);
+ if (lp->function == ci->mfa.function && lp->arity == ci->mfa.arity) {
+ *erts_codeinfo_to_code(ci) = BeamOpCodeAddr(op_hipe_trap_call_closure);
+ fe->address = erts_codeinfo_to_code(ci);
}
}
-#endif
return;
}
@@ -6157,10 +6602,9 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
[{Adr, Patchtyppe} | Addresses]
and the address of a fun_entry.
*/
-int
+static int
patch(Eterm Addresses, Uint fe)
{
-#ifdef HIPE
Eterm* listp;
Eterm tuple;
Eterm* tp;
@@ -6196,15 +6640,13 @@ patch(Eterm Addresses, Uint fe)
}
-#endif
return 1;
}
-int
+static int
patch_funentries(Eterm Patchlist)
{
-#ifdef HIPE
while (!is_nil(Patchlist)) {
Eterm Info;
Eterm MFA;
@@ -6278,38 +6720,29 @@ patch_funentries(Eterm Patchlist)
fe = erts_get_fun_entry(Mod, uniq, index);
fe->native_address = (Uint *)native_address;
- /* Deliberate MEMORY LEAK of native fun entries!!!
- *
- * Uncomment line below when hipe code upgrade and purging works correctly.
- * Today we may get cases when old (leaked) native code of a purged module
- * gets called and tries to create instances of a deleted fun entry.
- *
- * Reproduced on a debug emulator with stdlib_test/qlc_SUITE:join_merge
- *
- * erts_refc_dec(&fe->refc, 1);
- */
+ erts_refc_dec(&fe->refc, 1);
if (!patch(Addresses, (Uint) fe))
return 0;
}
-#endif
return 1; /* Signal that all went well */
}
-
/*
* Do a dummy load of a module. No threaded code will be loaded.
* Used for loading native code.
* Will also patch all references to fun_entries to point to
* the new fun_entries created.
*/
-
Eterm
-erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
+erts_make_stub_module(Process* p, Eterm hipe_magic_bin, Eterm Beam, Eterm Info)
{
Binary* magic;
+ Binary* hipe_magic;
LoaderState* stp;
+ HipeLoaderState* hipe_stp;
+ HipeModule *hipe_code;
BeamInstr Funcs;
BeamInstr Patchlist;
Eterm MD5Bin;
@@ -6332,8 +6765,12 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
*/
magic = erts_alloc_loader_state();
stp = ERTS_MAGIC_BIN_DATA(magic);
+ hipe_code = erts_alloc(ERTS_ALC_T_HIPE_LL, sizeof(*hipe_code));
- if (is_not_atom(Mod)) {
+ if (!is_internal_magic_ref(hipe_magic_bin) ||
+ !(hipe_magic = erts_magic_ref2bin(hipe_magic_bin),
+ hipe_stp = hipe_get_loader_state(hipe_magic)) ||
+ hipe_stp->module == NIL || hipe_stp->text_segment == 0) {
goto error;
}
if (is_not_tuple(Info)) {
@@ -6361,13 +6798,13 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Scan the Beam binary and read the interesting sections.
*/
- stp->module = Mod;
+ stp->module = hipe_stp->module;
stp->group_leader = p->group_leader;
stp->num_functions = n;
if (!init_iff_file(stp, bytes, size)) {
goto error;
}
- if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES) ||
!verify_chunks(stp)) {
goto error;
}
@@ -6375,9 +6812,16 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
if (!read_code_header(stp)) {
goto error;
}
- define_file(stp, "atom table", ATOM_CHUNK);
- if (!load_atom_table(stp)) {
- goto error;
+ if (stp->chunks[UTF8_ATOM_CHUNK].size > 0) {
+ define_file(stp, "utf8 atom table", UTF8_ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_UTF8)) {
+ goto error;
+ }
+ } else {
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp, ERTS_ATOM_ENC_LATIN1)) {
+ goto error;
+ }
}
define_file(stp, "export table", EXP_CHUNK);
if (!stub_read_export_table(stp)) {
@@ -6469,21 +6913,18 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Set the pointer and make the stub. Put a return instruction
* as the body until we know what kind of trap we should put there.
*/
- code_hdr->functions[i] = fp;
-#ifdef HIPE
- op = (Eterm) BeamOpCode(op_hipe_trap_call); /* Might be changed later. */
-#else
- op = (Eterm) BeamOpCode(op_move_return_n);
-#endif
- fp = make_stub(fp, Mod, func, arity, (Uint)native_address, op);
+ code_hdr->functions[i] = (ErtsCodeInfo*)fp;
+ op = BeamOpCodeAddr(op_hipe_trap_call); /* Might be changed later. */
+ fp = make_stub((ErtsCodeInfo*)fp, hipe_stp->module, func, arity,
+ (Uint)native_address, op);
}
/*
* Insert the last pointer and the int_code_end instruction.
*/
- code_hdr->functions[i] = fp;
- *fp++ = (BeamInstr) BeamOp(op_int_code_end);
+ code_hdr->functions[i] = (ErtsCodeInfo*)fp;
+ *fp++ = BeamOpCodeAddr(op_int_code_end);
/*
* Copy attributes and compilation information.
@@ -6515,11 +6956,20 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
}
/*
+ * Initialise HiPE module
+ */
+ hipe_code->text_segment = hipe_stp->text_segment;
+ hipe_code->text_segment_size = hipe_stp->text_segment_size;
+ hipe_code->data_segment = hipe_stp->data_segment;
+ hipe_code->first_hipe_ref = hipe_stp->new_hipe_refs;
+ hipe_code->first_hipe_sdesc = hipe_stp->new_hipe_sdesc;
+
+ /*
* Insert the module in the module table.
*/
- rval = stub_insert_new_code(p, 0, p->group_leader, Mod,
- code_hdr, code_size);
+ rval = stub_insert_new_code(p, 0, p->group_leader, hipe_stp->module,
+ code_hdr, code_size, hipe_code);
if (rval != NIL) {
goto error;
}
@@ -6530,23 +6980,76 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
fp = code_base;
for (i = 0; i < n; i++) {
- stub_final_touch(stp, fp);
+ stub_final_touch(stp, (ErtsCodeInfo*)fp);
fp += WORDS_PER_FUNCTION;
}
if (patch_funentries(Patchlist)) {
+ Eterm mod = hipe_stp->module;
+ /* Prevent code from being freed */
+ hipe_stp->text_segment = 0;
+ hipe_stp->data_segment = 0;
+ hipe_stp->new_hipe_refs = NULL;
+ hipe_stp->new_hipe_sdesc = NULL;
+
erts_free_aligned_binary_bytes(temp_alloc);
free_loader_state(magic);
- return Mod;
+ hipe_free_loader_state(hipe_stp);
+
+ return mod;
}
error:
+ erts_free(ERTS_ALC_T_HIPE_LL, hipe_code);
erts_free_aligned_binary_bytes(temp_alloc);
free_loader_state(magic);
BIF_ERROR(p, BADARG);
}
+int erts_commit_hipe_patch_load(Eterm hipe_magic_bin)
+{
+ Binary* hipe_magic;
+ HipeLoaderState* hipe_stp;
+ HipeModule *hipe_code;
+ Module* modp;
+
+ if (!is_internal_magic_ref(hipe_magic_bin) ||
+ !(hipe_magic = erts_magic_ref2bin(hipe_magic_bin),
+ hipe_stp = hipe_get_loader_state(hipe_magic)) ||
+ hipe_stp->module == NIL || hipe_stp->text_segment == 0) {
+ return 0;
+ }
+
+ modp = erts_get_module(hipe_stp->module, erts_active_code_ix());
+ if (!modp)
+ return 0;
+
+ /*
+ * Initialise HiPE module
+ */
+ hipe_code = erts_alloc(ERTS_ALC_T_HIPE_LL, sizeof(*hipe_code));
+ hipe_code->text_segment = hipe_stp->text_segment;
+ hipe_code->text_segment_size = hipe_stp->text_segment_size;
+ hipe_code->data_segment = hipe_stp->data_segment;
+ hipe_code->first_hipe_ref = hipe_stp->new_hipe_refs;
+ hipe_code->first_hipe_sdesc = hipe_stp->new_hipe_sdesc;
+
+ modp->curr.hipe_code = hipe_code;
+
+ /* Prevent code from being freed */
+ hipe_stp->text_segment = 0;
+ hipe_stp->data_segment = 0;
+ hipe_stp->new_hipe_refs = NULL;
+ hipe_stp->new_hipe_sdesc = NULL;
+
+ hipe_redirect_to_module(modp);
+
+ return 1;
+}
+
#undef WORDS_PER_FUNCTION
+#endif /* HIPE */
+
static int safe_mul(UWord a, UWord b, UWord* resp)
{
@@ -6560,3 +7063,46 @@ static int safe_mul(UWord a, UWord b, UWord* resp)
}
}
+#ifdef ENABLE_DBG_TRACE_MFA
+
+#define MFA_MAX 10
+Eterm dbg_trace_m[MFA_MAX];
+Eterm dbg_trace_f[MFA_MAX];
+Uint dbg_trace_a[MFA_MAX];
+unsigned int dbg_trace_ix = 0;
+
+void dbg_set_traced_mfa(const char* m, const char* f, Uint a)
+{
+ unsigned i = dbg_trace_ix++;
+ ASSERT(i < MFA_MAX);
+ dbg_trace_m[i] = am_atom_put(m, sys_strlen(m));
+ dbg_trace_f[i] = am_atom_put(f, sys_strlen(f));
+ dbg_trace_a[i] = a;
+}
+
+int dbg_is_traced_mfa(Eterm m, Eterm f, Uint a)
+{
+ unsigned int i;
+ for (i = 0; i < dbg_trace_ix; ++i) {
+ if (m == dbg_trace_m[i] &&
+ (!f || (f == dbg_trace_f[i] && a == dbg_trace_a[i]))) {
+
+ return i+1;
+ }
+ }
+ return 0;
+}
+
+void dbg_vtrace_mfa(unsigned ix, const char* format, ...)
+{
+ va_list arglist;
+ va_start(arglist, format);
+ ASSERT(--ix < MFA_MAX);
+ erts_fprintf(stderr, "MFA TRACE %T:%T/%u: ",
+ dbg_trace_m[ix], dbg_trace_f[ix], (int)dbg_trace_a[ix]);
+
+ erts_vfprintf(stderr, format, arglist);
+ va_end(arglist);
+}
+
+#endif /* ENABLE_DBG_TRACE_MFA */
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index 1200bb9c6f..156c3c45e2 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -35,20 +35,7 @@ typedef struct gen_op_entry {
int transform;
} GenOpEntry;
-extern GenOpEntry gen_opc[];
-
-#ifdef NO_JUMP_TABLE
-#define BeamOp(Op) (Op)
-#else
-extern void** beam_ops;
-#define BeamOp(Op) beam_ops[(Op)]
-#endif
-
-
-extern BeamInstr beam_debug_apply[];
-extern BeamInstr* em_call_error_handler;
-extern BeamInstr* em_apply_bif;
-extern BeamInstr* em_call_nif;
+extern const GenOpEntry gen_opc[];
struct ErtsLiteralArea_;
@@ -115,16 +102,20 @@ typedef struct beam_code_header {
* The actual loaded code (for the first function) start just beyond
* this table.
*/
- BeamInstr* functions[1];
+ ErtsCodeInfo* functions[1];
}BeamCodeHeader;
+# define BEAM_NIF_MIN_FUNC_SZ 4
+
void erts_release_literal_area(struct ErtsLiteralArea_* literal_area);
int erts_is_module_native(BeamCodeHeader* code);
+int erts_is_function_native(ErtsCodeInfo*);
void erts_beam_bif_load_init(void);
struct erl_fun_entry;
void erts_purge_state_add_fun(struct erl_fun_entry *fe);
-Export *erts_suspend_process_on_pending_purge_lambda(Process *c_p);
+Export *erts_suspend_process_on_pending_purge_lambda(Process *c_p,
+ struct erl_fun_entry*);
/*
* Layout of the line table.
@@ -151,4 +142,34 @@ struct BeamCodeLineTab_ {
#define LOC_FILE(Loc) ((Loc) >> 24)
#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1))
+
+/*
+ * MFA event debug "tracing" usage:
+ *
+ * #define ENABLE_DBG_TRACE_MFA
+ * call dbg_set_traced_mfa("mymod","myfunc",arity)
+ * for the function(s) to trace, in some init function.
+ *
+ * Run and get stderr printouts when interesting things happen to your MFA.
+ */
+#ifdef ENABLE_DBG_TRACE_MFA
+
+void dbg_set_traced_mfa(const char* m, const char* f, Uint a);
+int dbg_is_traced_mfa(Eterm m, Eterm f, Uint a);
+void dbg_vtrace_mfa(unsigned ix, const char* format, ...);
+#define DBG_TRACE_MFA(M,F,A,FMT, ...) do {\
+ unsigned ix;\
+ if ((ix=dbg_is_traced_mfa(M,F,A))) \
+ dbg_vtrace_mfa(ix, FMT"\n", ##__VA_ARGS__);\
+ }while(0)
+
+#define DBG_TRACE_MFA_P(MFA, FMT, ...) \
+ DBG_TRACE_MFA((MFA)->module, (MFA)->function, (MFA)->arity, FMT, ##__VA_ARGS__)
+
+#else
+# define dbg_set_traced_mfa(M,F,A)
+# define DBG_TRACE_MFA(M,F,A,FMT, ...)
+# define DBG_TRACE_MFA_P(MFA,FMT, ...)
+#endif /* ENABLE_DBG_TRACE_MFA */
+
#endif /* _BEAM_LOAD_H */
diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c
index 55342a38c6..9f3153724a 100644
--- a/erts/emulator/beam/beam_ranges.c
+++ b/erts/emulator/beam/beam_ranges.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -26,15 +26,23 @@
#include "erl_vm.h"
#include "global.h"
#include "beam_load.h"
+#include "erl_unicode.h"
typedef struct {
BeamInstr* start; /* Pointer to start of module. */
- erts_smp_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */
+ erts_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */
} Range;
+/*
+ * Used for crash dumping of literals. The size of erts_dump_lit_areas is
+ * always at least the number of active ranges.
+ */
+ErtsLiteralArea** erts_dump_lit_areas;
+Uint erts_dump_num_lit_areas;
+
/* Range 'end' needs to be atomic as we purge module
by setting end=start in active code_ix */
-#define RANGE_END(R) ((BeamInstr*)erts_smp_atomic_read_nob(&(R)->end))
+#define RANGE_END(R) ((BeamInstr*)erts_atomic_read_nob(&(R)->end))
static Range* find_range(BeamInstr* pc);
static void lookup_loc(FunctionInfo* fi, const BeamInstr* pc,
@@ -49,10 +57,10 @@ struct ranges {
Range* modules; /* Sorted lists of module addresses. */
Sint n; /* Number of range entries. */
Sint allocated; /* Number of allocated entries. */
- erts_smp_atomic_t mid; /* Cached search start point */
+ erts_atomic_t mid; /* Cached search start point */
};
static struct ranges r[ERTS_NUM_CODE_IX];
-static erts_smp_atomic_t mem_used;
+static erts_atomic_t mem_used;
static Range* write_ptr;
#ifdef HARD_DEBUG
@@ -90,13 +98,18 @@ erts_init_ranges(void)
{
Sint i;
- erts_smp_atomic_init_nob(&mem_used, 0);
+ erts_atomic_init_nob(&mem_used, 0);
for (i = 0; i < ERTS_NUM_CODE_IX; i++) {
r[i].modules = 0;
r[i].n = 0;
r[i].allocated = 0;
- erts_smp_atomic_init_nob(&r[i].mid, 0);
+ erts_atomic_init_nob(&r[i].mid, 0);
}
+
+ erts_dump_num_lit_areas = 8;
+ erts_dump_lit_areas = (ErtsLiteralArea **)
+ erts_alloc(ERTS_ALC_T_CRASH_DUMP,
+ erts_dump_num_lit_areas * sizeof(ErtsLiteralArea*));
}
void
@@ -107,12 +120,12 @@ erts_start_staging_ranges(int num_new)
Sint need;
if (r[dst].modules) {
- erts_smp_atomic_add_nob(&mem_used, -r[dst].allocated);
+ erts_atomic_add_nob(&mem_used, -r[dst].allocated);
erts_free(ERTS_ALC_T_MODULE_REFS, r[dst].modules);
}
need = r[dst].allocated = r[src].n + num_new;
- erts_smp_atomic_add_nob(&mem_used, need);
+ erts_atomic_add_nob(&mem_used, need);
write_ptr = erts_alloc(ERTS_ALC_T_MODULE_REFS,
need * sizeof(Range));
r[dst].modules = write_ptr;
@@ -135,7 +148,7 @@ erts_end_staging_ranges(int commit)
if (rp->start < RANGE_END(rp)) {
/* Only insert a module that has not been purged. */
write_ptr->start = rp->start;
- erts_smp_atomic_init_nob(&write_ptr->end,
+ erts_atomic_init_nob(&write_ptr->end,
(erts_aint_t)(RANGE_END(rp)));
write_ptr++;
}
@@ -161,9 +174,17 @@ erts_end_staging_ranges(int commit)
}
r[dst].modules = mp;
CHECK(&r[dst]);
- erts_smp_atomic_set_nob(&r[dst].mid,
+ erts_atomic_set_nob(&r[dst].mid,
(erts_aint_t) (r[dst].modules +
r[dst].n / 2));
+
+ if (r[dst].allocated > erts_dump_num_lit_areas) {
+ erts_dump_num_lit_areas = r[dst].allocated * 2;
+ erts_dump_lit_areas = (ErtsLiteralArea **)
+ erts_realloc(ERTS_ALC_T_CRASH_DUMP,
+ (void *) erts_dump_lit_areas,
+ erts_dump_num_lit_areas * sizeof(ErtsLiteralArea*));
+ }
}
}
@@ -182,7 +203,7 @@ erts_update_ranges(BeamInstr* code, Uint size)
*/
if (r[dst].modules == NULL) {
Sint need = 128;
- erts_smp_atomic_add_nob(&mem_used, need);
+ erts_atomic_add_nob(&mem_used, need);
r[dst].modules = erts_alloc(ERTS_ALC_T_MODULE_REFS,
need * sizeof(Range));
r[dst].allocated = need;
@@ -192,7 +213,7 @@ erts_update_ranges(BeamInstr* code, Uint size)
ASSERT(r[dst].modules);
write_ptr->start = code;
- erts_smp_atomic_init_nob(&(write_ptr->end),
+ erts_atomic_init_nob(&(write_ptr->end),
(erts_aint_t)(((byte *)code) + size));
write_ptr++;
}
@@ -201,13 +222,13 @@ void
erts_remove_from_ranges(BeamInstr* code)
{
Range* rp = find_range(code);
- erts_smp_atomic_set_nob(&rp->end, (erts_aint_t)rp->start);
+ erts_atomic_set_nob(&rp->end, (erts_aint_t)rp->start);
}
UWord
erts_ranges_sz(void)
{
- return erts_smp_atomic_read_nob(&mem_used) * sizeof(Range);
+ return erts_atomic_read_nob(&mem_used) * sizeof(Range);
}
/*
@@ -221,13 +242,13 @@ erts_ranges_sz(void)
void
erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
{
- BeamInstr** low;
- BeamInstr** high;
- BeamInstr** mid;
+ ErtsCodeInfo** low;
+ ErtsCodeInfo** high;
+ ErtsCodeInfo** mid;
Range* rp;
BeamCodeHeader* hdr;
- fi->current = NULL;
+ fi->mfa = NULL;
fi->needed = 5;
fi->loc = LINE_INVALID_LOCATION;
rp = find_range(pc);
@@ -240,12 +261,12 @@ erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
high = low + hdr->num_functions;
while (low < high) {
mid = low + (high-low) / 2;
- if (pc < mid[0]) {
+ if (pc < (BeamInstr*)(mid[0])) {
high = mid;
- } else if (pc < mid[1]) {
- fi->current = mid[0]+2;
+ } else if (pc < (BeamInstr*)(mid[1])) {
+ fi->mfa = &mid[0]->mfa;
if (full_info) {
- BeamInstr** fp = hdr->functions;
+ ErtsCodeInfo** fp = hdr->functions;
int idx = mid - fp;
lookup_loc(fi, pc, hdr, idx);
}
@@ -262,7 +283,7 @@ find_range(BeamInstr* pc)
ErtsCodeIndex active = erts_active_code_ix();
Range* low = r[active].modules;
Range* high = low + r[active].n;
- Range* mid = (Range *) erts_smp_atomic_read_nob(&r[active].mid);
+ Range* mid = (Range *) erts_atomic_read_nob(&r[active].mid);
CHECK(&r[active]);
while (low < high) {
@@ -271,7 +292,7 @@ find_range(BeamInstr* pc)
} else if (pc >= RANGE_END(mid)) {
low = mid + 1;
} else {
- erts_smp_atomic_set_nob(&r[active].mid, (erts_aint_t) mid);
+ erts_atomic_set_nob(&r[active].mid, (erts_aint_t) mid);
return mid;
}
mid = low + (high-low) / 2;
@@ -316,11 +337,10 @@ lookup_loc(FunctionInfo* fi, const BeamInstr* pc,
file = LOC_FILE(fi->loc);
if (file == 0) {
/* Special case: Module name with ".erl" appended */
- Atom* mod_atom = atom_tab(atom_val(fi->current[0]));
+ Atom* mod_atom = atom_tab(atom_val(fi->mfa->module));
fi->needed += 2*(mod_atom->len+4);
} else {
- Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
- fi->needed += 2*ap->len;
+ fi->needed += 2*erts_atom_to_string_length((fi->fname_ptr)[file-1]);
}
return;
} else {
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index d9048065c8..015c051cc1 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -46,24 +46,23 @@
#include "erl_bif_unique.h"
#include "erl_map.h"
#include "erl_msacc.h"
+#include "erl_proc_sig_queue.h"
Export *erts_await_result;
+static Export await_exit_trap;
static Export* flush_monitor_messages_trap = NULL;
static Export* set_cpu_topology_trap = NULL;
-static Export* await_proc_exit_trap = NULL;
static Export* await_port_send_result_trap = NULL;
Export* erts_format_cpu_topology_trap = NULL;
static Export dsend_continue_trap_export;
Export *erts_convert_time_unit_trap = NULL;
static Export *await_msacc_mod_trap = NULL;
-static erts_smp_atomic32_t msacc;
+static erts_atomic32_t msacc;
+static Export *system_flag_scheduler_wall_time_trap;
static Export *await_sched_wall_time_mod_trap;
-static erts_smp_atomic32_t sched_wall_time;
-
-static erts_smp_mtx_t ports_snapshot_mtx;
-erts_smp_atomic_t erts_dead_ports_ptr; /* To store dying ports during snapshot */
+static erts_atomic32_t sched_wall_time;
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
@@ -94,85 +93,51 @@ BIF_RETTYPE spawn_3(BIF_ALIST_3)
/* Utility to add a new link between processes p and another internal
* process (rpid). Process p must be the currently executing process.
*/
-static int insert_internal_link(Process* p, Eterm rpid)
-{
- Process *rp;
- ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
-
- ASSERT(is_internal_pid(rpid));
-
-#ifdef ERTS_SMP
- if (IS_TRACED(p)
- && (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1))) {
- rp_locks = ERTS_PROC_LOCKS_ALL;
- }
-
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
-#endif
-
- /* get a pointer to the process struct of the linked process */
- rp = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
- rpid, rp_locks,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
-
- if (!rp) {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
- return 0;
- }
-
- if (p != rp) {
- erts_add_link(&ERTS_P_LINKS(p), LINK_PID, rp->common.id);
- erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, p->common.id);
-
- ASSERT(IS_TRACER_VALID(ERTS_TRACER(p)));
-
- if (IS_TRACED(p)) {
- if (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1)) {
- ERTS_TRACE_FLAGS(rp) |= (ERTS_TRACE_FLAGS(p) & TRACEE_FLAGS);
- erts_tracer_replace(&rp->common, ERTS_TRACER(p));
- if (ERTS_TRACE_FLAGS(p) & F_TRACE_SOL1) { /* maybe override */
- ERTS_TRACE_FLAGS(rp) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- }
- }
- }
- }
- if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(p, p == rp ? rp_locks : ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
- rp, am_getting_linked, p->common.id);
-
- if (p == rp)
- erts_smp_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN);
- else {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
- erts_smp_proc_unlock(rp, rp_locks);
- }
-
- return 1;
-}
-
/* create a link to the process */
BIF_RETTYPE link_1(BIF_ALIST_1)
{
- DistEntry *dep;
-
if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) {
trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_P, am_link, BIF_ARG_1);
}
/* check that the pid or port which is our argument is OK */
if (is_internal_pid(BIF_ARG_1)) {
- if (insert_internal_link(BIF_P, BIF_ARG_1)) {
- BIF_RET(am_true);
- }
- else {
- goto res_no_proc;
- }
+ int created;
+ ErtsLinkData *ldp;
+ ErtsLink *lnk;
+
+ if (BIF_P->common.id == BIF_ARG_1)
+ BIF_RET(am_true);
+
+ if (!erts_proc_lookup(BIF_ARG_1))
+ goto res_no_proc;
+
+ lnk = erts_link_tree_lookup_create(&ERTS_P_LINKS(BIF_P),
+ &created,
+ ERTS_LNK_TYPE_PROC,
+ BIF_P->common.id,
+ BIF_ARG_1);
+ if (!created)
+ BIF_RET(am_true);
+
+ ldp = erts_link_to_data(lnk);
+
+
+ if (erts_proc_sig_send_link(BIF_P, BIF_ARG_1, &ldp->b))
+ BIF_RET(am_true);
+
+ erts_link_tree_delete(&ERTS_P_LINKS(BIF_P), lnk);
+ erts_link_release_both(ldp);
+ goto res_no_proc;
}
if (is_internal_port(BIF_ARG_1)) {
- int send_link_signal = 0;
+ int created;
+ ErtsLinkData *ldp;
+ ErtsLink *lnk;
+ Eterm ref;
+ Eterm *refp;
Port *prt = erts_port_lookup(BIF_ARG_1,
(erts_port_synchronous_ops
? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
@@ -181,31 +146,31 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
goto res_no_proc;
}
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
-
- if (erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1) >= 0)
- send_link_signal = 1;
- /* else: already linked */
-
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ lnk = erts_link_tree_lookup_create(&ERTS_P_LINKS(BIF_P),
+ &created,
+ ERTS_LNK_TYPE_PORT,
+ BIF_P->common.id,
+ BIF_ARG_1);
+ if (!created)
+ BIF_RET(am_true);
- if (send_link_signal) {
- Eterm ref;
- Eterm *refp = erts_port_synchronous_ops ? &ref : NULL;
+ ldp = erts_link_to_data(lnk);
+ refp = erts_port_synchronous_ops ? &ref : NULL;
- switch (erts_port_link(BIF_P, prt, BIF_P->common.id, refp)) {
- case ERTS_PORT_OP_DROPPED:
- case ERTS_PORT_OP_BADARG:
- goto res_no_proc;
- case ERTS_PORT_OP_SCHEDULED:
- if (refp) {
- ASSERT(is_internal_ref(ref));
- BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
- }
- default:
- break;
- }
- }
+ switch (erts_port_link(BIF_P, prt, &ldp->b, refp)) {
+ case ERTS_PORT_OP_DROPPED:
+ case ERTS_PORT_OP_BADARG:
+ erts_link_tree_delete(&ERTS_P_LINKS(BIF_P), lnk);
+ erts_link_release_both(ldp);
+ goto res_no_proc;
+ case ERTS_PORT_OP_SCHEDULED:
+ if (refp) {
+ ASSERT(is_internal_ordinary_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
+ }
+ default:
+ break;
+ }
BIF_RET(am_true);
}
else if (is_external_port(BIF_ARG_1)
@@ -214,342 +179,203 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
}
if (is_external_pid(BIF_ARG_1)) {
+ ErtsLinkData *ldp;
+ int created;
+ DistEntry *dep;
+ ErtsLink *lnk;
+ int code;
+ ErtsDSigData dsd;
+
+ dep = external_pid_dist_entry(BIF_ARG_1);
+ if (dep == erts_this_dist_entry)
+ goto res_no_proc;
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
-
- /* We may earn time by checking first that we're not linked already */
- if (erts_lookup_link(ERTS_P_LINKS(BIF_P), BIF_ARG_1) != NULL) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
- BIF_RET(am_true);
- }
- else {
- ErtsLink *lnk;
- int code;
- ErtsDSigData dsd;
- dep = external_pid_dist_entry(BIF_ARG_1);
- if (dep == erts_this_dist_entry) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
- goto res_no_proc;
- }
-
- code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_RLOCK, 0);
- switch (code) {
- case ERTS_DSIG_PREP_NOT_ALIVE:
- /* Let the dlink trap handle it */
- case ERTS_DSIG_PREP_NOT_CONNECTED:
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
- BIF_TRAP1(dlink_trap, BIF_P, BIF_ARG_1);
-
- case ERTS_DSIG_PREP_CONNECTED:
- /* We are connected. Setup link and send link signal */
-
- erts_smp_de_links_lock(dep);
-
- erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1);
- lnk = erts_add_or_lookup_link(&(dep->nlinks),
- LINK_PID,
- BIF_P->common.id);
- ASSERT(lnk != NULL);
- erts_add_link(&ERTS_LINK_ROOT(lnk), LINK_PID, BIF_ARG_1);
-
- erts_smp_de_links_unlock(dep);
- erts_smp_de_runlock(dep);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
-
- code = erts_dsig_send_link(&dsd, BIF_P->common.id, BIF_ARG_1);
- if (code == ERTS_DSIG_SEND_YIELD)
- ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
- BIF_RET(am_true);
- default:
- ASSERT(! "Invalid dsig prepare result");
- BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
- }
- }
+ lnk = erts_link_tree_lookup_create(&ERTS_P_LINKS(BIF_P),
+ &created,
+ ERTS_LNK_TYPE_DIST_PROC,
+ BIF_P->common.id,
+ BIF_ARG_1);
+
+ if (!created)
+ BIF_RET(am_true); /* Already present... */
+
+ ldp = erts_link_to_data(lnk);
+
+ code = erts_dsig_prepare(&dsd, dep, BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_RLOCK, 0, 1);
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ erts_link_set_dead_dist(&ldp->b, dep->sysname);
+ erts_proc_sig_send_link_exit(NULL, BIF_ARG_1, &ldp->b,
+ am_noconnection, NIL);
+ BIF_RET(am_true);
+
+ case ERTS_DSIG_PREP_PENDING:
+ case ERTS_DSIG_PREP_CONNECTED: {
+ /*
+ * We have (pending) connection.
+ * Setup link and enqueue link signal.
+ */
+#ifdef DEBUG
+ int inserted =
+#endif
+ erts_link_dist_insert(&ldp->b, dep->mld);
+ ASSERT(inserted);
+ erts_de_runlock(dep);
+
+ code = erts_dsig_send_link(&dsd, BIF_P->common.id, BIF_ARG_1);
+ if (code == ERTS_DSIG_SEND_YIELD)
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ BIF_RET(am_true);
+ break;
+ }
+ default:
+ ERTS_ASSERT(! "Invalid dsig prepare result");
+ }
}
BIF_ERROR(BIF_P, BADARG);
-res_no_proc: {
- erts_aint32_t state = erts_smp_atomic32_read_nob(&BIF_P->state);
- if (state & ERTS_PSFLG_TRAP_EXIT) {
- ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
- erts_deliver_exit_message(BIF_ARG_1, BIF_P, &locks, am_noproc, NIL);
- erts_smp_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks);
- BIF_RET(am_true);
- }
- else
- BIF_ERROR(BIF_P, EXC_NOPROC);
+res_no_proc:
+ if (BIF_P->flags & F_TRAP_EXIT) {
+ ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
+ erts_deliver_exit_message(BIF_ARG_1, BIF_P, &locks, am_noproc, NIL);
+ erts_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks);
+ BIF_RET(am_true);
+ }
+ else {
+ /*
+ * This behaviour is *really* sad but link/1 has
+ * behaved like this for ages (and this behaviour is
+ * actually documented)... :'-(
+ *
+ * The proper behavior would have been to
+ * send calling process an exit signal..
+ */
+ BIF_ERROR(BIF_P, EXC_NOPROC);
}
}
-/* This function is allowed to return range of values handled by demonitor/1-2
- * Namely: atoms true, false, yield, internal_error, badarg or THE_NON_VALUE
- */
static Eterm
-remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
+demonitor(Process *c_p, Eterm ref, Eterm *multip)
{
- ErtsDSigData dsd;
- ErtsMonitor *dmon;
- ErtsMonitor *mon;
- int code;
- Eterm res = am_false;
-#ifndef ERTS_SMP
- int stale_mon = 0;
-#endif
+ ErtsMonitor *mon; /* The monitor entry to delete */
- ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK)
- == erts_proc_lc_my_proc_locks(c_p));
+ *multip = am_false;
- code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_DSP_RLOCK, 0);
- switch (code) {
- case ERTS_DSIG_PREP_NOT_ALIVE:
- case ERTS_DSIG_PREP_NOT_CONNECTED:
-#ifndef ERTS_SMP
- /* XXX Is this possible? Shouldn't this link
- previously have been removed if the node
- had previously been disconnected. */
- ASSERT(0);
- stale_mon = 1;
-#endif
- /*
- * In the smp case this is possible if the node goes
- * down just before the call to demonitor.
- */
- if (dep) {
- erts_smp_de_links_lock(dep);
- dmon = erts_remove_monitor(&dep->monitors, ref);
- erts_smp_de_links_unlock(dep);
- if (dmon)
- erts_destroy_monitor(dmon);
- }
- mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
-
- res = am_true;
- break;
-
- case ERTS_DSIG_PREP_CONNECTED:
-
- erts_smp_de_links_lock(dep);
- mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
- dmon = erts_remove_monitor(&dep->monitors, ref);
- erts_smp_de_links_unlock(dep);
- erts_smp_de_runlock(dep);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
-
- if (!dmon) {
-#ifndef ERTS_SMP
- /* XXX How is this possible? Shouldn't this link
- previously have been removed when the distributed
- end was removed. */
- ASSERT(0);
- stale_mon = 1;
-#endif
- /*
- * This is possible when smp support is enabled.
- * 'DOWN' message just arrived.
- */
- res = am_true;
- }
- else {
- /*
- * Soft (no force) send, use ->data in dist slot
- * monitor list since in case of monitor name
- * the atom is stored there. Yield if necessary.
- */
- code = erts_dsig_send_demonitor(&dsd,
- c_p->common.id,
- (mon->name != NIL
- ? mon->name
- : mon->pid),
- ref,
- 0);
- res = (code == ERTS_DSIG_SEND_YIELD ? am_yield : am_true);
- erts_destroy_monitor(dmon);
- }
- break;
- default:
- ASSERT(! "Invalid dsig prepare result");
- return am_internal_error;
- }
-
-#ifndef ERTS_SMP
- if (stale_mon) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Stale process monitor %T to ", ref);
- if (is_atom(to))
- erts_dsprintf(dsbufp, "{%T, %T}", to, dep->sysname);
- else
- erts_dsprintf(dsbufp, "%T", to);
- erts_dsprintf(dsbufp, " found\n");
- erts_send_error_to_logger(c_p->group_leader, dsbufp);
- }
-#endif
-
- /*
- * We aren't allowed to destroy 'mon' until now, since 'to'
- * may refer into 'mon' (external pid).
- */
- ASSERT(mon); /* Since link lock wasn't released between
- lookup and remove */
- erts_destroy_monitor(mon);
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
- return res;
-}
+ if (is_not_internal_ref(ref)) {
+ if (is_external_ref(ref)
+ && (erts_this_dist_entry
+ == external_ref_dist_entry(ref))) {
+ return am_false;
+ }
+ return am_badarg; /* Not monitored by this monitor's ref */
+ }
-static ERTS_INLINE void
-demonitor_local_process(Process *c_p, Eterm ref, Eterm to, Eterm *res)
-{
- Process *rp = erts_pid2proc_opt(c_p,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
- to,
- ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- ErtsMonitor *mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
-
-#ifndef ERTS_SMP
- ASSERT(mon);
-#else
- if (!mon)
- *res = am_false;
- else
-#endif
- {
- *res = am_true;
- erts_destroy_monitor(mon);
- }
- if (rp) {
- ErtsMonitor *rmon;
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
- if (rp != c_p)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- if (rmon != NULL)
- erts_destroy_monitor(rmon);
- }
- else {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(c_p);
- }
-}
+ mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p), ref);
+ if (!mon)
+ return am_false;
-static ERTS_INLINE BIF_RETTYPE
-demonitor_local_port(Process *origin, Eterm ref, Eterm target)
-{
- BIF_RETTYPE res = am_false;
- Port *port = erts_port_lookup_raw(target);
+ if (!erts_monitor_is_origin(mon))
+ return am_badarg;
- if (!port) {
- BIF_ERROR(origin, BADARG);
- }
- erts_smp_proc_unlock(origin, ERTS_PROC_LOCK_LINK);
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), mon);
- if (port) {
- Eterm trap_ref;
- switch (erts_port_demonitor(origin, ERTS_PORT_DEMONITOR_NORMAL,
- port, ref, &trap_ref)) {
- case ERTS_PORT_OP_DROPPED:
- case ERTS_PORT_OP_BADARG:
- break;
- case ERTS_PORT_OP_SCHEDULED:
- BIF_TRAP3(await_port_send_result_trap, origin, trap_ref,
- am_busy_port, am_true);
- /* the busy_port atom will never be returned, because it cannot be
- * returned from erts_port_(de)monitor, but just in case if in future
- * internal API changes - you may see this atom */
- default:
- break;
- }
- }
- else {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(origin);
- }
- BIF_RET(res);
-}
+ switch (mon->type) {
-/* Can return atom true, false, yield, internal_error, badarg or
- * THE_NON_VALUE if error occured or trap has been set up
- */
-static
-BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip)
-{
- ErtsMonitor *mon = NULL; /* The monitor entry to delete */
- Eterm to = NIL; /* Monitor link traget */
- DistEntry *dep = NULL; /* Target's distribution entry */
- int deref_de = 0;
- BIF_RETTYPE res = am_false;
- int unlock_link = 1;
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ *multip = am_true;
+ erts_demonitor_time_offset(mon);
+ return am_true;
+
+ case ERTS_MON_TYPE_PORT: {
+ Port *prt;
+ ASSERT(is_internal_port(mon->other.item));
+ prt = erts_port_lookup(mon->other.item, ERTS_PORT_SFLGS_DEAD);
+ if (!prt || erts_port_demonitor(c_p, prt, mon) == ERTS_PORT_OP_DROPPED)
+ erts_monitor_release(mon);
+ return am_true;
+ }
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_LINK);
+ case ERTS_MON_TYPE_PROC:
+ erts_proc_sig_send_demonitor(mon);
+ return am_true;
- if (is_not_internal_ref(ref)) {
- res = am_badarg;
- goto done; /* Cannot be this monitor's ref */
- }
+ case ERTS_MON_TYPE_DIST_PROC: {
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
+ Eterm to = mon->other.item;
+ DistEntry *dep;
+ int code = ERTS_DSIG_SEND_OK;
+ int deleted;
+ ErtsDSigData dsd;
- mon = erts_lookup_monitor(ERTS_P_MONITORS(c_p), ref);
- if (!mon) {
- goto done;
- }
+ ASSERT(is_external_pid(to) || is_node_name_atom(to));
- switch (mon->type) {
- case MON_TIME_OFFSET:
- *multip = am_true;
- erts_demonitor_time_offset(ref);
- res = am_true;
- break;
- case MON_ORIGIN:
- to = mon->pid;
- *multip = am_false;
- if (is_atom(to)) {
+ if (is_external_pid(to))
+ dep = external_pid_dist_entry(to);
+ else {
/* Monitoring a name at node to */
- ASSERT(is_node_name_atom(to));
dep = erts_sysname_to_connected_dist_entry(to);
ASSERT(dep != erts_this_dist_entry);
- if (dep)
- deref_de = 1;
- } else if (is_port(to)) {
- if (port_dist_entry(to) != erts_this_dist_entry) {
- goto badarg;
+ if (!dep) {
+ erts_monitor_release(mon);
+ return am_false;
}
- res = demonitor_local_port(c_p, ref, to);
- unlock_link = 0;
- goto done;
- } else {
- ASSERT(is_pid(to));
- dep = pid_dist_entry(to);
}
- if (dep != erts_this_dist_entry) {
- res = remote_demonitor(c_p, dep, ref, to);
- /* remote_demonitor() unlocks link lock on c_p */
- unlock_link = 0;
+
+ code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_RLOCK, 0, 0);
+
+ deleted = erts_monitor_dist_delete(&mdp->target);
+
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ /*
+ * In the smp case this is possible if the node goes
+ * down just before the call to demonitor.
+ */
+ break;
+
+ case ERTS_DSIG_PREP_PENDING:
+ case ERTS_DSIG_PREP_CONNECTED: {
+ Eterm watched;
+
+ erts_de_runlock(dep);
+
+ if (mon->flags & ERTS_ML_FLG_NAME)
+ watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
+ else
+ watched = to;
+
+ /*
+ * Soft (no force) send, use ->data in dist slot
+ * monitor list since in case of monitor name
+ * the atom is stored there. Yield if necessary.
+ */
+ code = erts_dsig_send_demonitor(&dsd, c_p->common.id,
+ watched, mdp->ref, 0);
+ break;
}
- else { /* Local monitor */
- if (deref_de) {
- deref_de = 0;
- erts_deref_dist_entry(dep);
- }
- dep = NULL;
- demonitor_local_process(c_p, ref, to, &res);
+
+ default:
+ ERTS_INTERNAL_ERROR("invalid result from erts_dsig_prepare()");
+ break;
}
- break;
- default /* case */ :
-badarg:
- res = am_badarg; /* will be converted to error by caller */
- *multip = am_false;
- break;
- }
-done:
- if (unlock_link)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
+ if (deleted)
+ erts_monitor_release(&mdp->target);
- if (deref_de) {
- ASSERT(dep);
- erts_deref_dist_entry(dep);
+ erts_monitor_release(mon);
+ return code == ERTS_DSIG_SEND_YIELD ? am_yield : am_true;
}
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
- BIF_RET(res);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected monitor type");
+ return am_false;
+ }
}
BIF_RETTYPE demonitor_1(BIF_ALIST_1)
@@ -557,25 +383,23 @@ BIF_RETTYPE demonitor_1(BIF_ALIST_1)
Eterm multi;
switch (demonitor(BIF_P, BIF_ARG_1, &multi)) {
case am_false:
- case am_true: BIF_RET(am_true);
- case THE_NON_VALUE: BIF_RET(THE_NON_VALUE);
- case am_yield: ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
- case am_badarg: BIF_ERROR(BIF_P, BADARG);
-
- case am_internal_error:
+ case am_true:
+ BIF_RET(am_true);
+ case am_yield:
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ case am_badarg:
default:
- ASSERT(! "demonitor(): internal error");
- BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ BIF_ERROR(BIF_P, BADARG);
}
}
BIF_RETTYPE demonitor_2(BIF_ALIST_2)
{
- BIF_RETTYPE res = am_true;
- Eterm multi = am_false;
- int info = 0;
- int flush = 0;
- Eterm list = BIF_ARG_2;
+ BIF_RETTYPE res;
+ Eterm multi = am_false;
+ int info = 0;
+ int flush = 0;
+ Eterm list = BIF_ARG_2;
while (is_list(list)) {
Eterm* consp = list_val(list);
@@ -595,10 +419,9 @@ BIF_RETTYPE demonitor_2(BIF_ALIST_2)
if (is_not_nil(list))
goto badarg;
+ res = am_true;
switch (demonitor(BIF_P, BIF_ARG_1, &multi)) {
- case THE_NON_VALUE:
- /* If other error occured or trap has been set up - pass through */
- BIF_RET(THE_NON_VALUE);
+
case am_false:
if (info)
res = am_false;
@@ -607,20 +430,29 @@ flush_messages:
BIF_TRAP3(flush_monitor_messages_trap, BIF_P,
BIF_ARG_1, multi, res);
}
+ /* Fall through... */
+
case am_true:
if (multi == am_true && flush)
goto flush_messages;
BIF_RET(res);
+
case am_yield:
- ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ /* return true after yield... */
+ if (flush) {
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ goto flush_messages;
+ }
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+
case am_badarg:
-badarg:
- BIF_ERROR(BIF_P, BADARG);
- case am_internal_error:
default:
- ASSERT(! "demonitor(): internal error");
- BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ break;
+
}
+
+badarg:
+ BIF_ERROR(BIF_P, BADARG);
}
/* Type must be atomic object! */
@@ -660,305 +492,212 @@ erts_queue_monitor_message(Process *p,
erts_queue_message(p, *p_locksp, msgp, tup, am_system);
}
-static Eterm
-local_pid_monitor(Process *p, Eterm target, Eterm mon_ref, int boolean)
+BIF_RETTYPE monitor_2(BIF_ALIST_2)
{
- Eterm ret = mon_ref;
- Process *rp;
- ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK;
-
- if (target == p->common.id) {
- return ret;
- }
-
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
- rp = erts_pid2proc_opt(p, p_locks,
- target, ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- if (!rp) {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
- p_locks &= ~ERTS_PROC_LOCK_LINK;
- if (boolean)
- ret = am_false;
- else
- erts_queue_monitor_message(p, &p_locks,
- mon_ref, am_process, target, am_noproc);
- }
- else {
- ASSERT(rp != p);
+ Eterm target = BIF_ARG_2;
+ Eterm tmp_heap[3];
+ Eterm ref, id, name;
+ ErtsMonitorData *mdp;
+
+ if (BIF_ARG_1 == am_process) {
+ DistEntry *dep;
+ int byname;
+
+ if (is_internal_pid(target)) {
+ name = NIL;
+ id = target;
+
+ local_process:
+
+ ref = erts_make_ref(BIF_P);
+ if (id != BIF_P->common.id) {
+ mdp = erts_monitor_create(ERTS_MON_TYPE_PROC,
+ ref, BIF_P->common.id,
+ id, name);
+ erts_monitor_tree_insert(&ERTS_P_MONITORS(BIF_P),
+ &mdp->origin);
+
+ if (!erts_proc_sig_send_monitor(&mdp->target, id))
+ erts_proc_sig_send_monitor_down(&mdp->target,
+ am_noproc);
+ }
+ BIF_RET(ref);
+ }
- if (boolean)
- ret = am_true;
+ if (is_atom(target)) {
+ local_named_process:
+ name = target;
+ id = erts_whereis_name_to_id(BIF_P, target);
+ if (is_internal_pid(id))
+ goto local_process;
+ target = TUPLE2(&tmp_heap[0], name,
+ erts_this_dist_entry->sysname);
+ goto noproc;
+ }
- erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, target, NIL);
- erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id, NIL);
+ if (is_external_pid(target)) {
+ ErtsDSigData dsd;
+ int code;
+
+ dep = external_pid_dist_entry(target);
+ if (dep == erts_this_dist_entry)
+ goto noproc;
+
+ id = target;
+ name = NIL;
+ byname = 0;
+
+ remote_process:
+
+ ref = erts_make_ref(BIF_P);
+ mdp = erts_monitor_create(ERTS_MON_TYPE_DIST_PROC, ref,
+ BIF_P->common.id, id, name);
+ erts_monitor_tree_insert(&ERTS_P_MONITORS(BIF_P), &mdp->origin);
+
+ code = erts_dsig_prepare(&dsd, dep,
+ BIF_P, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_RLOCK, 0, 1);
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ erts_monitor_set_dead_dist(&mdp->target, dep->sysname);
+ erts_proc_sig_send_monitor_down(&mdp->target, am_noconnection);
+ code = ERTS_DSIG_SEND_OK;
+ break;
+
+ case ERTS_DSIG_PREP_PENDING:
+ case ERTS_DSIG_PREP_CONNECTED: {
+#ifdef DEBUG
+ int inserted =
+#endif
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- }
+ erts_monitor_dist_insert(&mdp->target, dep->mld);
+ ASSERT(inserted);
+ erts_de_runlock(dep);
- erts_smp_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN);
+ code = erts_dsig_send_monitor(&dsd, BIF_P->common.id, target, ref);
+ break;
+ }
- return ret;
-}
+ default:
+ ERTS_ASSERT(! "Invalid dsig prepare result");
+ code = ERTS_DSIG_SEND_OK;
+ break;
+ }
-static BIF_RETTYPE
-local_port_monitor(Process *origin, Eterm target)
-{
- BIF_RETTYPE ref = erts_make_ref(origin);
- Port *port = erts_sig_lookup_port(origin, target);
- ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN;
+ if (byname)
+ erts_deref_dist_entry(dep);
- if (!port) {
-res_no_proc:
- /* Send the DOWN message immediately. Ref is made on the fly because
- * caller has never seen it yet. */
- erts_queue_monitor_message(origin, &p_locks, ref,
- am_port, target, am_noproc);
- }
- else {
- switch (erts_port_monitor(origin, port, target, &ref)) {
- case ERTS_PORT_OP_DROPPED:
- case ERTS_PORT_OP_BADARG:
- goto res_no_proc;
- case ERTS_PORT_OP_SCHEDULED:
- BIF_TRAP3(await_port_send_result_trap, origin, ref,
- am_busy_port, ref);
- /* the busy_port atom will never be returned, because it cannot be
- * returned from erts_port_monitor, but just in case if in future
- * internal API changes - you may see this atom */
- default:
- break;
+ if (code == ERTS_DSIG_SEND_YIELD)
+ ERTS_BIF_YIELD_RETURN(BIF_P, ref);
+ BIF_RET(ref);
}
- }
- erts_smp_proc_unlock(origin, p_locks & ~ERTS_PROC_LOCK_MAIN);
- BIF_RET(ref);
-}
-/* Type = process | port :: atom(), 1st argument passed to erlang:monitor/2
- */
-static BIF_RETTYPE
-local_name_monitor(Process *self, Eterm type, Eterm target_name)
-{
- BIF_RETTYPE ret = erts_make_ref(self);
+ if (is_tuple(target)) {
+ Eterm *tpl = tuple_val(target);
+ if (arityval(tpl[0]) != 2)
+ goto badarg;
+ if (is_not_atom(tpl[1]) || is_not_atom(tpl[2]))
+ goto badarg;
+ if (!erts_is_alive && tpl[2] != am_Noname)
+ goto badarg;
+ target = tpl[1];
+ dep = erts_find_or_insert_dist_entry(tpl[2]);
+ if (dep == erts_this_dist_entry) {
+ erts_deref_dist_entry(dep);
+ goto local_named_process;
+ }
- ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK;
- Process *proc = NULL;
- Port *port = NULL;
+ id = dep->sysname;
+ name = target;
+ byname = 1;
+ goto remote_process;
+ }
- erts_smp_proc_lock(self, ERTS_PROC_LOCK_LINK);
+ /* badarg... */
+ }
+ else if (BIF_ARG_1 == am_port) {
+
+ if (is_internal_port(target)) {
+ Port *prt;
+ name = NIL;
+ id = target;
+ local_port:
+ ref = erts_make_ref(BIF_P);
+ mdp = erts_monitor_create(ERTS_MON_TYPE_PORT, ref,
+ BIF_P->common.id, id, name);
+ erts_monitor_tree_insert(&ERTS_P_MONITORS(BIF_P), &mdp->origin);
+ prt = erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ if (!prt || erts_port_monitor(BIF_P, prt, &mdp->target) == ERTS_PORT_OP_DROPPED)
+ erts_proc_sig_send_monitor_down(&mdp->target, am_noproc);
+ BIF_RET(ref);
+ }
- erts_whereis_name(self, p_locks, target_name,
- &proc, ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X,
- &port, 0);
+ if (is_atom(target)) {
+ local_named_port:
+ name = target;
+ id = erts_whereis_name_to_id(BIF_P, target);
+ if (is_internal_port(id))
+ goto local_port;
+ target = TUPLE2(&tmp_heap[0], name,
+ erts_this_dist_entry->sysname);
+ goto noproc;
+ }
- /* If the name is not registered,
- * or if we asked for proc and got a port,
- * or if we asked for port and got a proc,
- * we just send the 'DOWN' message.
- */
- if ((!proc && !port) ||
- (type == am_process && port) ||
- (type == am_port && proc)) {
- DeclareTmpHeap(lhp,3,self);
- Eterm item;
- UseTmpHeap(3,self);
-
- erts_smp_proc_unlock(self, ERTS_PROC_LOCK_LINK);
- p_locks &= ~ERTS_PROC_LOCK_LINK;
-
- item = TUPLE2(lhp, target_name, erts_this_dist_entry->sysname);
- erts_queue_monitor_message(self, &p_locks,
- ret,
- type, /* = process|port :: atom() */
- item, am_noproc);
- UnUseTmpHeap(3,self);
- }
- else if (port) {
- erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN);
- p_locks &= ~ERTS_PROC_LOCK_MAIN;
-
- switch (erts_port_monitor(self, port, target_name, &ret)) {
- case ERTS_PORT_OP_DONE:
- return ret;
- case ERTS_PORT_OP_SCHEDULED: { /* Scheduled a signal */
- ASSERT(is_internal_ref(ret));
- BIF_TRAP3(await_port_send_result_trap, self,
- ret, am_true, ret);
- /* bif_trap returns */
- } break;
- default:
+ if (is_external_port(target)) {
+ if (erts_this_dist_entry == external_port_dist_entry(target))
+ goto noproc;
goto badarg;
}
- }
- else if (proc != self) {
- erts_add_monitor(&ERTS_P_MONITORS(self), MON_ORIGIN, ret,
- proc->common.id, target_name);
- erts_add_monitor(&ERTS_P_MONITORS(proc), MON_TARGET, ret,
- self->common.id, target_name);
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_LINK);
- }
- if (p_locks) {
- erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN);
- }
- BIF_RET(ret);
-badarg:
- if (p_locks) {
- erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN);
- }
- BIF_ERROR(self, BADARG);
-}
-
-static BIF_RETTYPE
-remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2,
- DistEntry *dep, Eterm target, int byname)
-{
- ErtsDSigData dsd;
- BIF_RETTYPE ret;
- int code;
-
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
- code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_RLOCK, 0);
- switch (code) {
- case ERTS_DSIG_PREP_NOT_ALIVE:
- /* Let the dmonitor_p trap handle it */
- case ERTS_DSIG_PREP_NOT_CONNECTED:
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
- ERTS_BIF_PREP_TRAP2(ret, dmonitor_p_trap, p, bifarg1, bifarg2);
- break;
- case ERTS_DSIG_PREP_CONNECTED:
- if (!(dep->flags & DFLAG_DIST_MONITOR)
- || (byname && !(dep->flags & DFLAG_DIST_MONITOR_NAME))) {
- erts_smp_de_runlock(dep);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
- ERTS_BIF_PREP_ERROR(ret, p, BADARG);
- }
- else {
- Eterm p_trgt, p_name, d_name, mon_ref;
+ if (is_tuple(target)) {
+ Eterm *tpl = tuple_val(target);
+ if (arityval(tpl[0]) != 2)
+ goto badarg;
+ if (is_not_atom(tpl[1]) || is_not_atom(tpl[2]))
+ goto badarg;
+ if (tpl[2] == erts_this_dist_entry->sysname) {
+ target = tpl[1];
+ goto local_named_port;
+ }
+ }
- mon_ref = erts_make_ref(p);
+ /* badarg... */
+ }
+ else if (BIF_ARG_1 == am_time_offset) {
- if (byname) {
- p_trgt = dep->sysname;
- p_name = target;
- d_name = target;
- }
- else {
- p_trgt = target;
- p_name = NIL;
- d_name = NIL;
- }
+ if (target != am_clock_service)
+ goto badarg;
+ ref = erts_make_ref(BIF_P);
+ mdp = erts_monitor_create(ERTS_MON_TYPE_TIME_OFFSET,
+ ref, BIF_P->common.id,
+ am_clock_service, NIL);
+ erts_monitor_tree_insert(&ERTS_P_MONITORS(BIF_P), &mdp->origin);
- erts_smp_de_links_lock(dep);
+ erts_monitor_time_offset(&mdp->target);
- erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, p_trgt,
- p_name);
- erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->common.id,
- d_name);
+ BIF_RET(ref);
+ }
- erts_smp_de_links_unlock(dep);
- erts_smp_de_runlock(dep);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+badarg:
- code = erts_dsig_send_monitor(&dsd, p->common.id, target, mon_ref);
- if (code == ERTS_DSIG_SEND_YIELD)
- ERTS_BIF_PREP_YIELD_RETURN(ret, p, mon_ref);
- else
- ERTS_BIF_PREP_RET(ret, mon_ref);
- }
- break;
- default:
- ASSERT(! "Invalid dsig prepare result");
- ERTS_BIF_PREP_ERROR(ret, p, EXC_INTERNAL_ERROR);
- break;
- }
+ BIF_ERROR(BIF_P, BADARG);
- BIF_RET(ret);
-}
-
-BIF_RETTYPE monitor_2(BIF_ALIST_2)
-{
- Eterm target = BIF_ARG_2;
- BIF_RETTYPE ret;
- DistEntry *dep = NULL;
- int deref_de = 0;
+noproc: {
+ ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
- /* Only process monitors are implemented */
- switch (BIF_ARG_1) {
- case am_time_offset: {
- Eterm ref;
- if (BIF_ARG_2 != am_clock_service) {
- goto badarg;
- }
- ref = erts_make_ref(BIF_P);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
- erts_add_monitor(&ERTS_P_MONITORS(BIF_P), MON_TIME_OFFSET,
- ref, am_clock_service, NIL);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
- erts_monitor_time_offset(BIF_P->common.id, ref);
- BIF_RET(ref);
- }
- case am_process:
- case am_port:
- break;
- default:
- goto badarg;
- }
+ ref = erts_make_ref(BIF_P);
+ erts_queue_monitor_message(BIF_P,
+ &locks,
+ ref,
+ BIF_ARG_1,
+ target,
+ am_noproc);
+ if (locks != ERTS_PROC_LOCK_MAIN)
+ erts_proc_unlock(BIF_P, locks & ~ERTS_PROC_LOCK_MAIN);
- if (is_internal_pid(target) && BIF_ARG_1 == am_process) {
-local_pid:
- ret = local_pid_monitor(BIF_P, target, erts_make_ref(BIF_P), 0);
- } else if (is_external_pid(target) && BIF_ARG_1 == am_process) {
- dep = external_pid_dist_entry(target);
- if (dep == erts_this_dist_entry)
- goto local_pid;
- ret = remote_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, dep, target, 0);
- } else if (is_internal_port(target) && BIF_ARG_1 == am_port) {
-local_port:
- ret = local_port_monitor(BIF_P, target);
- } else if (is_external_port(target) && BIF_ARG_1 == am_port) {
- dep = external_port_dist_entry(target);
- if (dep == erts_this_dist_entry) {
- goto local_port;
- }
- goto badarg; /* No want remote port */
- } else if (is_atom(target)) {
- ret = local_name_monitor(BIF_P, BIF_ARG_1, target);
- } else if (is_tuple(target)) {
- Eterm *tp = tuple_val(target);
- Eterm remote_node;
- Eterm name;
- if (arityval(*tp) != 2) {
- goto badarg;
- }
- remote_node = tp[2];
- name = tp[1];
- if (!is_atom(remote_node) || !is_atom(name)) {
- goto badarg;
- }
- if (!erts_is_alive && remote_node != am_Noname) {
- goto badarg; /* Remote monitor from (this) undistributed node */
- }
- dep = erts_sysname_to_connected_dist_entry(remote_node);
- if (dep == erts_this_dist_entry) {
- deref_de = 1;
- ret = local_name_monitor(BIF_P, BIF_ARG_1, name);
- } else {
- if (dep)
- deref_de = 1;
- ret = remote_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, dep, name, 1);
- }
- } else {
-badarg:
- ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ BIF_RET(ref);
}
- if (deref_de) {
- deref_de = 0;
- erts_deref_dist_entry(dep);
- }
-
- return ret;
}
/**********************************************************************/
@@ -1012,7 +751,7 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
so.max_heap_size = H_MAX_SIZE;
so.max_heap_flags = H_MAX_FLAGS;
so.priority = PRIORITY_NORMAL;
- so.max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
+ so.max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs);
so.scheduler = 0;
/*
@@ -1131,181 +870,102 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
/* remove a link from a process */
BIF_RETTYPE unlink_1(BIF_ALIST_1)
{
- Process *rp;
- DistEntry *dep;
- ErtsLink *l = NULL, *rl = NULL;
- ErtsProcLocks cp_locks = ERTS_PROC_LOCK_MAIN;
-
- /*
- * SMP specific note concerning incoming exit signals:
- * We have to have at least the status lock during removal of
- * the link half on current process, and check for and handle
- * a present pending exit while the status lock is held. This
- * in order to ensure that we wont be exited by a link after
- * it has been removed.
- *
- * (We also have to have the link lock, of course, in order to
- * be allowed to remove the link...)
- */
-
if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) {
- trace_proc(BIF_P, cp_locks, BIF_P, am_unlink, BIF_ARG_1);
+ trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_P, am_unlink, BIF_ARG_1);
}
- if (is_internal_port(BIF_ARG_1)) {
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
-#ifdef ERTS_SMP
- if (ERTS_PROC_PENDING_EXIT(BIF_P))
- goto handle_pending_exit;
-#endif
-
- l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
+ if (is_internal_pid(BIF_ARG_1)) {
+ ErtsLink *lnk = erts_link_tree_lookup(ERTS_P_LINKS(BIF_P), BIF_ARG_1);
+ if (lnk) {
+ erts_link_tree_delete(&ERTS_P_LINKS(BIF_P), lnk);
+ erts_proc_sig_send_unlink(BIF_P, lnk);
+ }
+ BIF_RET(am_true);
+ }
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ if (is_internal_port(BIF_ARG_1)) {
+ ErtsLink *lnk = erts_link_tree_lookup(ERTS_P_LINKS(BIF_P), BIF_ARG_1);
- if (l) {
+ if (lnk) {
+ Eterm ref;
+ Eterm *refp = erts_port_synchronous_ops ? &ref : NULL;
+ ErtsPortOpResult res = ERTS_PORT_OP_DROPPED;
Port *prt;
- erts_destroy_link(l);
+ erts_link_tree_delete(&ERTS_P_LINKS(BIF_P), lnk);
/* Send unlink signal */
prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_DEAD);
if (prt) {
- ErtsPortOpResult res;
- Eterm ref;
- Eterm *refp = erts_port_synchronous_ops ? &ref : NULL;
#ifdef DEBUG
ref = NIL;
#endif
- res = erts_port_unlink(BIF_P, prt, BIF_P->common.id, refp);
+ res = erts_port_unlink(BIF_P, prt, lnk, refp);
- if (refp && res == ERTS_PORT_OP_SCHEDULED) {
- ASSERT(is_internal_ref(ref));
- BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
- }
}
+
+ if (res == ERTS_PORT_OP_DROPPED)
+ erts_link_release(lnk);
+ else if (refp && res == ERTS_PORT_OP_SCHEDULED) {
+ ASSERT(is_internal_ordinary_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
+ }
}
BIF_RET(am_true);
}
- else if (is_external_port(BIF_ARG_1)
- && external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
- BIF_RET(am_true);
- }
-
- if (is_not_pid(BIF_ARG_1))
- BIF_ERROR(BIF_P, BADARG);
if (is_external_pid(BIF_ARG_1)) {
- ErtsDistLinkData dld;
+ ErtsLink *lnk, *dlnk;
+ ErtsLinkData *ldp;
+ DistEntry *dep;
int code;
ErtsDSigData dsd;
- /* Blind removal, we might have trapped or anything, this leaves
- us in a state where monitors might be inconsistent, but the dist
- code should take care of it. */
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
-#ifdef ERTS_SMP
- if (ERTS_PROC_PENDING_EXIT(BIF_P))
- goto handle_pending_exit;
-#endif
- l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
-
- erts_smp_proc_unlock(BIF_P,
- ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
-
- if (l)
- erts_destroy_link(l);
dep = external_pid_dist_entry(BIF_ARG_1);
- if (dep == erts_this_dist_entry) {
+ if (dep == erts_this_dist_entry)
BIF_RET(am_true);
- }
- code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_NO_LOCK, 0);
+ lnk = erts_link_tree_lookup(ERTS_P_LINKS(BIF_P), BIF_ARG_1);
+ if (!lnk)
+ BIF_RET(am_true);
+
+ erts_link_tree_delete(&ERTS_P_LINKS(BIF_P), lnk);
+ dlnk = erts_link_to_other(lnk, &ldp);
+
+ if (erts_link_dist_delete(dlnk))
+ erts_link_release_both(ldp);
+ else
+ erts_link_release(lnk);
+
+ code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_NO_LOCK, 0, 0);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
-#if 1
BIF_RET(am_true);
-#else
- /*
- * This is how we used to do it, but the link is obviously not
- * active, so I see no point in setting up a connection.
- * /Rickard
- */
- BIF_TRAP1(dunlink_trap, BIF_P, BIF_ARG_1);
-#endif
-
+ case ERTS_DSIG_PREP_PENDING:
case ERTS_DSIG_PREP_CONNECTED:
- erts_remove_dist_link(&dld, BIF_P->common.id, BIF_ARG_1, dep);
code = erts_dsig_send_unlink(&dsd, BIF_P->common.id, BIF_ARG_1);
- erts_destroy_dist_link(&dld);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
- BIF_RET(am_true);
-
+ break;
default:
ASSERT(! "Invalid dsig prepare result");
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
}
- }
-
- /* Internal pid... */
-
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
-
- cp_locks |= ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS;
- /* get process struct */
- rp = erts_pid2proc_opt(BIF_P, cp_locks,
- BIF_ARG_1, ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
-
-#ifdef ERTS_SMP
- if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
- if (rp && rp != BIF_P)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- goto handle_pending_exit;
- }
-#endif
-
- /* unlink and ignore errors */
- l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
- if (l != NULL)
- erts_destroy_link(l);
-
- if (!rp) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ BIF_RET(am_true);
}
- else {
- rl = erts_remove_link(&ERTS_P_LINKS(rp), BIF_P->common.id);
- if (rl != NULL)
- erts_destroy_link(rl);
-
- if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rl != NULL) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
- cp_locks &= ~ERTS_PROC_LOCK_STATUS;
- trace_proc(BIF_P, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK),
- rp, am_getting_unlinked, BIF_P->common.id);
- }
- if (rp != BIF_P)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (is_external_port(BIF_ARG_1)) {
+ if (external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
+ BIF_RET(am_true);
+ /* Links to Remote ports not supported... */
}
-
- erts_smp_proc_unlock(BIF_P, cp_locks & ~ERTS_PROC_LOCK_MAIN);
-
- BIF_RET(am_true);
-#ifdef ERTS_SMP
- handle_pending_exit:
- erts_handle_pending_exit(BIF_P, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_LINK
- | ERTS_PROC_LOCK_STATUS));
- ASSERT(ERTS_PROC_IS_EXITING(BIF_P));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
- ERTS_BIF_EXITED(BIF_P);
-#endif
+ BIF_ERROR(BIF_P, BADARG);
}
BIF_RETTYPE hibernate_3(BIF_ALIST_3)
@@ -1317,7 +977,11 @@ BIF_RETTYPE hibernate_3(BIF_ALIST_3)
*/
Eterm reg[3];
- if (erts_hibernate(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, reg)) {
+ reg[0] = BIF_ARG_1;
+ reg[1] = BIF_ARG_2;
+ reg[2] = BIF_ARG_3;
+
+ if (erts_hibernate(BIF_P, reg)) {
/*
* If hibernate succeeded, TRAP. The process will be wait in a
* hibernated state if its state is inactive (!ERTS_PSFLG_ACTIVE);
@@ -1486,6 +1150,13 @@ BIF_RETTYPE raise_3(BIF_ALIST_3)
/* Create stacktrace and store */
if (erts_backtrace_depth < depth) {
depth = erts_backtrace_depth;
+ if (depth == 0) {
+ /*
+ * For consistency with stacktraces generated
+ * automatically, always include one element.
+ */
+ depth = 1;
+ }
must_copy = 1;
}
if (must_copy) {
@@ -1546,22 +1217,69 @@ BIF_RETTYPE raise_3(BIF_ALIST_3)
return am_badarg;
}
+static BIF_RETTYPE
+erts_internal_await_exit_trap(BIF_ALIST_0)
+{
+ /*
+ * We have sent ourselves an exit signal which will
+ * terminate ourselves. Handle all signals until
+ * terminated in order to ensure that signal order
+ * is preserved. Yield if necessary.
+ */
+ erts_aint32_t state;
+ int reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ (void) erts_proc_sig_handle_incoming(BIF_P, &state, &reds,
+ reds, !0);
+ BUMP_REDS(BIF_P, reds);
+ if (state & ERTS_PSFLG_EXITING)
+ ERTS_BIF_EXITED(BIF_P);
+
+ ERTS_BIF_YIELD0(&await_exit_trap, BIF_P);
+}
+
/**********************************************************************/
-/* send an exit message to another process (if trapping exits) or
- exit the other process */
+/* send an exit signal to another process */
-BIF_RETTYPE exit_2(BIF_ALIST_2)
+static BIF_RETTYPE send_exit_signal_bif(Process *c_p, Eterm id, Eterm reason, int exit2)
{
- Process *rp;
+ BIF_RETTYPE ret_val;
- /*
- * If the first argument is not a pid, or a local port it is an error.
- */
+ /*
+ * 'id' not a process id, nor a local port id is a 'badarg' error.
+ */
- if (is_internal_port(BIF_ARG_1)) {
+ if (is_internal_pid(id)) {
+ /*
+ * Preserve the very old and *very strange* behaviour
+ * of erlang:exit/2...
+ *
+ * - terminate ourselves even though exit reason
+ * is normal (unless we trap exit)
+ * - terminate ourselves before exit/2 return
+ */
+ int exit2_suicide = (exit2
+ && c_p->common.id == id
+ && (reason == am_kill
+ || !(c_p->flags & F_TRAP_EXIT)));
+ erts_proc_sig_send_exit(c_p, c_p->common.id, id,
+ reason, NIL, exit2_suicide);
+ if (!exit2_suicide)
+ ERTS_BIF_PREP_RET(ret_val, am_true);
+ else {
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+ ERTS_BIF_PREP_TRAP0(ret_val, &await_exit_trap, c_p);
+ }
+ }
+ else if (is_internal_port(id)) {
Eterm ref, *refp;
Uint32 invalid_flags;
Port *prt;
+ ErtsPortOpResult res = ERTS_PORT_OP_DONE;
+#ifdef DEBUG
+ ref = NIL;
+#endif
if (erts_port_synchronous_ops) {
refp = &ref;
@@ -1572,120 +1290,88 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
invalid_flags = ERTS_PORT_SFLGS_INVALID_LOOKUP;
}
- prt = erts_port_lookup(BIF_ARG_1, invalid_flags);
-
- if (prt) {
- ErtsPortOpResult res;
-
-#ifdef DEBUG
- ref = NIL;
-#endif
-
- res = erts_port_exit(BIF_P, 0, prt, BIF_P->common.id, BIF_ARG_2, refp);
-
- ERTS_BIF_CHK_EXITED(BIF_P);
-
- if (refp && res == ERTS_PORT_OP_SCHEDULED) {
- ASSERT(is_internal_ref(ref));
- BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
- }
-
- }
-
- BIF_RET(am_true);
+ prt = erts_port_lookup(id, invalid_flags);
+ if (prt)
+ res = erts_port_exit(c_p, 0, prt, c_p->common.id, reason, refp);
+
+ if (!refp || res != ERTS_PORT_OP_SCHEDULED)
+ ERTS_BIF_PREP_RET(ret_val, am_true);
+ else {
+ ASSERT(is_internal_ordinary_ref(ref));
+ ERTS_BIF_PREP_TRAP3(ret_val, await_port_send_result_trap,
+ c_p, ref, am_true, am_true);
+ }
}
- else if(is_external_port(BIF_ARG_1)
- && external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
- BIF_RET(am_true);
-
- /*
- * If it is a remote pid, send a signal to the remote node.
- */
-
- if (is_external_pid(BIF_ARG_1)) {
- int code;
- ErtsDSigData dsd;
- DistEntry *dep;
-
- dep = external_pid_dist_entry(BIF_ARG_1);
- if(dep == erts_this_dist_entry)
- BIF_RET(am_true);
-
- code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_NO_LOCK, 0);
- switch (code) {
- case ERTS_DSIG_PREP_NOT_ALIVE:
- case ERTS_DSIG_PREP_NOT_CONNECTED:
- BIF_TRAP2(dexit_trap, BIF_P, BIF_ARG_1, BIF_ARG_2);
- case ERTS_DSIG_PREP_CONNECTED:
- code = erts_dsig_send_exit2(&dsd, BIF_P->common.id, BIF_ARG_1, BIF_ARG_2);
- if (code == ERTS_DSIG_SEND_YIELD)
- ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
- BIF_RET(am_true);
- default:
- ASSERT(! "Invalid dsig prepare result");
- BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
- }
+ else if (is_external_pid(id)) {
+ DistEntry *dep = external_pid_dist_entry(id);
+ if (dep == erts_this_dist_entry)
+ ERTS_BIF_PREP_RET(ret_val, am_true); /* Old incarnation of this node... */
+ else {
+ int code;
+ ErtsDSigData dsd;
+
+ code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_NO_LOCK, 0, 1);
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ ERTS_BIF_PREP_RET(ret_val, am_true);
+ break;
+ case ERTS_DSIG_PREP_PENDING:
+ case ERTS_DSIG_PREP_CONNECTED:
+ code = erts_dsig_send_exit2(&dsd, c_p->common.id, id, reason);
+ if (code == ERTS_DSIG_SEND_YIELD)
+ ERTS_BIF_PREP_YIELD_RETURN(ret_val, c_p, am_true);
+ else
+ ERTS_BIF_PREP_RET(ret_val, am_true);
+ break;
+ default:
+ ASSERT(! "Invalid dsig prepare result");
+ ERTS_BIF_PREP_ERROR(ret_val, c_p, EXC_INTERNAL_ERROR);
+ break;
+ }
+ }
}
- else if (is_not_internal_pid(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+ else if (is_external_port(id)) {
+ DistEntry *dep = external_port_dist_entry(id);
+ if(dep == erts_this_dist_entry)
+ ERTS_BIF_PREP_RET(ret_val, am_true); /* Old incarnation of this node... */
+ else
+ ERTS_BIF_PREP_ERROR(ret_val, c_p, BADARG);
}
else {
- /*
- * The pid is internal. Verify that it refers to an existing process.
- */
- ErtsProcLocks rp_locks;
-
- if (BIF_ARG_1 == BIF_P->common.id) {
- rp_locks = ERTS_PROC_LOCKS_ALL;
- rp = BIF_P;
- erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- else {
- rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, rp_locks);
- if (!rp) {
- BIF_RET(am_true);
- }
- }
+ /* Not an id of a process or a port... */
- /*
- * Send an exit signal.
- */
- erts_send_exit_signal(BIF_P,
- BIF_P->common.id,
- rp,
- &rp_locks,
- BIF_ARG_2,
- NIL,
- NULL,
- BIF_P == rp ? ERTS_XSIG_FLG_NO_IGN_NORMAL : 0);
-#ifdef ERTS_SMP
- if (rp == BIF_P)
- rp_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
-#endif
- /*
- * We may have exited ourselves and may have to take action.
- */
- ERTS_BIF_CHK_EXITED(BIF_P);
- BIF_RET(am_true);
+ ERTS_BIF_PREP_ERROR(ret_val, c_p, BADARG);
}
+
+ return ret_val;
+}
+
+BIF_RETTYPE exit_2(BIF_ALIST_2)
+{
+ return send_exit_signal_bif(BIF_P, BIF_ARG_1, BIF_ARG_2, !0);
+}
+
+BIF_RETTYPE exit_signal_2(BIF_ALIST_2)
+{
+ return send_exit_signal_bif(BIF_P, BIF_ARG_1, BIF_ARG_2, 0);
}
+
/**********************************************************************/
/* this sets some process info- trapping exits or the error handler */
/* Handle flags common to both process_flag_2 and process_flag_3. */
-static BIF_RETTYPE process_flag_aux(Process *BIF_P,
- Process *rp,
- Eterm flag,
- Eterm val)
+static Eterm process_flag_aux(Process *c_p, int *redsp, Eterm flag, Eterm val)
{
Eterm old_value = NIL; /* shut up warning about use before set */
Sint i;
+
+ if (redsp)
+ *redsp = 1;
+
if (flag == am_save_calls) {
struct saved_calls *scb;
if (!is_small(val))
@@ -1705,30 +1391,89 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P,
}
#ifdef HIPE
- if (rp->flags & F_HIPE_MODE) {
- ASSERT(!ERTS_PROC_GET_SAVED_CALLS_BUF(rp));
- scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(rp, scb);
+ if (c_p->flags & F_HIPE_MODE) {
+ ASSERT(!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p));
+ scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(c_p, scb);
}
else
#endif
{
#ifdef HIPE
- ASSERT(!ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(rp));
+ ASSERT(!ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(c_p));
#endif
- scb = ERTS_PROC_SET_SAVED_CALLS_BUF(rp, scb);
- if (rp == BIF_P && ((scb && i == 0) || (!scb && i != 0))) {
- /* Adjust fcalls to match save calls setting... */
- if (i == 0)
- BIF_P->fcalls += CONTEXT_REDS; /* disabled it */
- else
- BIF_P->fcalls -= CONTEXT_REDS; /* enabled it */
-
- /*
- * Make sure we reschedule immediately so the
- * change take effect at once.
- */
- ERTS_VBUMP_ALL_REDS(BIF_P);
- }
+ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(c_p, scb);
+
+ if (((scb && i == 0) || (!scb && i != 0))) {
+
+ /*
+ * Make sure we reschedule immediately so the
+ * change take effect at once.
+ */
+ if (!redsp) {
+ /* Executed via BIF call.. */
+ via_bif:
+
+ /* Adjust fcalls to match save calls setting... */
+ if (i == 0)
+ c_p->fcalls += CONTEXT_REDS; /* disabled it */
+ else
+ c_p->fcalls -= CONTEXT_REDS; /* enabled it */
+
+ ERTS_VBUMP_ALL_REDS(c_p);
+ }
+ else {
+ erts_aint32_t state;
+ /*
+ * Executed via signal handler. Try to figure
+ * out in what context we are executing...
+ */
+
+ state = erts_atomic32_read_nob(&c_p->state);
+ if (state & (ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING)) {
+ /*
+ * We are either processing signals before
+ * being executed or executing dirty. That
+ * is, no need to adjust anything...
+ */
+ *redsp = 1;
+ }
+ else {
+ ErtsSchedulerData *esdp;
+ ASSERT(state & ERTS_PSFLG_RUNNING);
+
+ /*
+ * F_DELAY_GC is currently only set when
+ * we handle signals in state running via
+ * receive helper...
+ */
+
+ if (!(c_p->flags & F_DELAY_GC)) {
+ *redsp = 1;
+ goto via_bif;
+ }
+
+ /*
+ * Executing via receive helper...
+ *
+ * We utilize the virtual reds counter
+ * in order to get correct calculation
+ * of reductions consumed when scheduling
+ * out the process...
+ */
+
+ esdp = erts_get_scheduler_data();
+
+ if (i == 0)
+ esdp->virtual_reds += CONTEXT_REDS; /* disabled it */
+ else
+ esdp->virtual_reds -= CONTEXT_REDS; /* enabled it */
+
+ *redsp = -1;
+ }
+ }
+ }
}
if (!scb)
@@ -1738,11 +1483,12 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P,
erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
}
- BIF_RET(old_value);
+ ASSERT(is_immed(old_value));
+ return old_value;
}
error:
- BIF_ERROR(BIF_P, BADARG);
+ return am_badarg;
}
BIF_RETTYPE process_flag_2(BIF_ALIST_2)
@@ -1762,44 +1508,18 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
BIF_RET(old_value);
}
else if (BIF_ARG_1 == am_trap_exit) {
- erts_aint32_t state;
- Uint trap_exit;
- if (BIF_ARG_2 == am_true) {
- trap_exit = 1;
- } else if (BIF_ARG_2 == am_false) {
- trap_exit = 0;
- } else {
- goto error;
- }
- /*
- * NOTE: It is important that we check for pending exit signals
- * and handle them before returning if trap_exit is set to
- * true. For more info, see implementation of
- * erts_send_exit_signal().
- */
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND);
- if (trap_exit)
- state = erts_smp_atomic32_read_bor_mb(&BIF_P->state,
- ERTS_PSFLG_TRAP_EXIT);
+ old_value = (BIF_P->flags & F_TRAP_EXIT) ? am_true : am_false;
+ if (BIF_ARG_2 == am_true)
+ BIF_P->flags |= F_TRAP_EXIT;
+ else if (BIF_ARG_2 == am_false)
+ BIF_P->flags &= ~F_TRAP_EXIT;
else
- state = erts_smp_atomic32_read_band_mb(&BIF_P->state,
- ~ERTS_PSFLG_TRAP_EXIT);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND);
-
-#ifdef ERTS_SMP
- if (state & ERTS_PSFLG_PENDING_EXIT) {
- erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
- ERTS_BIF_EXITED(BIF_P);
- }
-#endif
-
- old_value = (state & ERTS_PSFLG_TRAP_EXIT) ? am_true : am_false;
+ goto error;
BIF_RET(old_value);
}
else if (BIF_ARG_1 == am_scheduler) {
ErtsRunQueue *old, *new, *curr;
Sint sched;
- erts_aint32_t state;
if (!is_small(BIF_ARG_2))
goto error;
@@ -1808,25 +1528,23 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
goto error;
if (sched == 0) {
+ old = erts_bind_runq_proc(BIF_P, 0);
new = NULL;
- state = erts_smp_atomic32_read_band_mb(&BIF_P->state,
- ~ERTS_PSFLG_BOUND);
}
else {
+ int bound = !0;
new = erts_schedid2runq(sched);
-#ifdef ERTS_SMP
- erts_atomic_set_nob(&BIF_P->run_queue, (erts_aint_t) new);
-#endif
- state = erts_smp_atomic32_read_bor_mb(&BIF_P->state,
- ERTS_PSFLG_BOUND);
+ old = erts_set_runq_proc(BIF_P, new, &bound);
+ if (!bound)
+ old = NULL;
}
+ old_value = old ? make_small(old->ix+1) : make_small(0);
+
curr = erts_proc_sched_data(BIF_P)->run_queue;
- old = (ERTS_PSFLG_BOUND & state) ? curr : NULL;
ASSERT(!old || old == curr);
- old_value = old ? make_small(old->ix+1) : make_small(0);
if (new && new != curr)
ERTS_BIF_YIELD_RETURN_X(BIF_P, old_value, am_scheduler);
else
@@ -1898,7 +1616,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
} else {
goto error;
}
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
old_value = (ERTS_TRACE_FLAGS(BIF_P) & F_SENSITIVE
? am_true
: am_false);
@@ -1907,7 +1625,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
} else {
ERTS_TRACE_FLAGS(BIF_P) &= ~F_SENSITIVE;
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
/* make sure to bump all reds so that we get
rescheduled immediately so setting takes effect */
BIF_RET2(old_value, CONTEXT_REDS);
@@ -1939,33 +1657,73 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
/* Fall through and try process_flag_aux() ... */
}
- BIF_RET(process_flag_aux(BIF_P, BIF_P, BIF_ARG_1, BIF_ARG_2));
+ old_value = process_flag_aux(BIF_P, NULL, BIF_ARG_1, BIF_ARG_2);
+ if (old_value != am_badarg)
+ BIF_RET(old_value);
error:
BIF_ERROR(BIF_P, BADARG);
}
-BIF_RETTYPE process_flag_3(BIF_ALIST_3)
+typedef struct {
+ Eterm flag;
+ Eterm value;
+ ErlOffHeap oh;
+ Eterm heap[1];
+} ErtsProcessFlag3Args;
+
+static Eterm
+exec_process_flag_3(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
{
- Process *rp;
- Eterm res;
-
-#ifdef ERTS_SMP
- rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD3(bif_export[BIF_process_flag_3], BIF_P,
- BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
-#else
- rp = erts_proc_lookup(BIF_ARG_1);
-#endif
+ ErtsProcessFlag3Args *pf3a = arg;
+ Eterm res;
+
+ if (ERTS_PROC_IS_EXITING(c_p))
+ res = am_badarg;
+ else
+ res = process_flag_aux(c_p, redsp, pf3a->flag, pf3a->value);
+ erts_cleanup_offheap(&pf3a->oh);
+ erts_free(ERTS_ALC_T_PF3_ARGS, arg);
+ return res;
+}
+
+
+BIF_RETTYPE erts_internal_process_flag_3(BIF_ALIST_3)
+{
+ Eterm res, *hp;
+ ErlOffHeap *ohp;
+ ErtsProcessFlag3Args *pf3a;
+ Uint flag_sz, value_sz;
+
+ if (BIF_P->common.id == BIF_ARG_1) {
+ res = process_flag_aux(BIF_P, NULL, BIF_ARG_2, BIF_ARG_3);
+ BIF_RET(res);
+ }
+
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_RET(am_badarg);
+
+ flag_sz = is_immed(BIF_ARG_2) ? 0 : size_object(BIF_ARG_2);
+ value_sz = is_immed(BIF_ARG_3) ? 0 : size_object(BIF_ARG_3);
- if (!rp)
- BIF_ERROR(BIF_P, BADARG);
+ pf3a = erts_alloc(ERTS_ALC_T_PF3_ARGS,
+ sizeof(ErtsProcessFlag3Args)
+ + sizeof(Eterm)*(flag_sz+value_sz-1));
- res = process_flag_aux(BIF_P, rp, BIF_ARG_2, BIF_ARG_3);
+ ohp = &pf3a->oh;
+ ERTS_INIT_OFF_HEAP(&pf3a->oh);
- if (rp != BIF_P)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ hp = &pf3a->heap[0];
+
+ pf3a->flag = copy_struct(BIF_ARG_2, flag_sz, &hp, ohp);
+ pf3a->value = copy_struct(BIF_ARG_3, value_sz, &hp, ohp);
+
+ res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1,
+ !0,
+ exec_process_flag_3,
+ (void *) pf3a);
+
+ if (is_non_value(res))
+ BIF_RET(am_badarg);
return res;
}
@@ -2037,7 +1795,7 @@ ebif_bang_2(BIF_ALIST_2)
* Send a message to Process, Port or Registered Process.
* Returns non-negative reduction bump or negative result code.
*/
-#define SEND_TRAP (-1)
+#define SEND_NOCONNECT (-1)
#define SEND_YIELD (-2)
#define SEND_YIELD_RETURN (-3)
#define SEND_BADARG (-4)
@@ -2047,27 +1805,28 @@ ebif_bang_2(BIF_ALIST_2)
#define SEND_YIELD_CONTINUE (-8)
-Sint do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext*);
-
static Sint remote_send(Process *p, DistEntry *dep,
Eterm to, Eterm full_to, Eterm msg,
ErtsSendContext* ctx)
{
Sint res;
int code;
-
ASSERT(is_atom(to) || is_external_pid(to));
- code = erts_dsig_prepare(&ctx->dsd, dep, p, ERTS_DSP_NO_LOCK, !ctx->suspend);
+ ctx->dep = dep;
+ code = erts_dsig_prepare(&ctx->dsd, dep, p, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_NO_LOCK,
+ !ctx->suspend, ctx->connect);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
- res = SEND_TRAP;
+ res = SEND_NOCONNECT;
break;
case ERTS_DSIG_PREP_WOULD_SUSPEND:
ASSERT(!ctx->suspend);
res = SEND_YIELD;
break;
+ case ERTS_DSIG_PREP_PENDING:
case ERTS_DSIG_PREP_CONNECTED: {
if (is_atom(to))
@@ -2102,8 +1861,8 @@ static Sint remote_send(Process *p, DistEntry *dep,
return res;
}
-Sint
-do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
+static Sint
+do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx)
{
Eterm portid;
Port *pt;
@@ -2203,9 +1962,6 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
}
switch (erts_port_command(p, ps_flags, pt, msg, refp)) {
- case ERTS_PORT_OP_CALLER_EXIT:
- /* We are exiting... */
- return SEND_USER_ERROR;
case ERTS_PORT_OP_BUSY:
/* Nothing has been sent */
if (ctx->suspend)
@@ -2221,7 +1977,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
/* Fall through */
case ERTS_PORT_OP_SCHEDULED:
if (is_not_nil(*refp)) {
- ASSERT(is_internal_ref(*refp));
+ ASSERT(is_internal_ordinary_ref(*refp));
ret_val = SEND_AWAIT_RESULT;
}
break;
@@ -2244,6 +2000,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
}
return ret_val;
} else if (is_tuple(to)) { /* Remote send */
+ int deref_dep = 0;
int ret;
tp = tuple_val(to);
if (*tp != make_arityval(2))
@@ -2251,15 +2008,13 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
if (is_not_atom(tp[1]) || is_not_atom(tp[2]))
return SEND_BADARG;
- /* sysname_to_connected_dist_entry will return NULL if there
- is no dist_entry or the dist_entry has no port,
+ /* erts_find_dist_entry will return NULL if there is no dist_entry
but remote_send() will handle that. */
- dep = erts_sysname_to_connected_dist_entry(tp[2]);
+ dep = erts_find_dist_entry(tp[2]);
if (dep == erts_this_dist_entry) {
Eterm id;
- erts_deref_dist_entry(dep);
if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
@@ -2280,15 +2035,20 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
}
return 0;
}
+ if (dep == NULL) {
+ dep = erts_find_or_insert_dist_entry(tp[2]);
+ ASSERT(dep != erts_this_dist_entry);
+ deref_dep = 1;
+ }
+ ctx->dsd.node = tp[2];
ret = remote_send(p, dep, tp[1], to, msg, ctx);
- if (ret != SEND_YIELD_CONTINUE) {
- if (dep) {
- erts_deref_dist_entry(dep);
- }
- } else {
- ctx->dep_to_deref = dep;
+ if (ret == SEND_YIELD_CONTINUE) {
+ erts_ref_dist_entry(ctx->dep);
+ ctx->deref_dep = 1;
}
+ if (deref_dep)
+ erts_deref_dist_entry(dep);
return ret;
} else {
if (IS_TRACED_FL(p, F_TRACE_SEND))
@@ -2300,22 +2060,15 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
send_message: {
ErtsProcLocks rp_locks = 0;
- Sint res;
-#ifdef ERTS_SMP
if (p == rp)
rp_locks |= ERTS_PROC_LOCK_MAIN;
-#endif
/* send to local process */
- res = erts_send_message(p, rp, &rp_locks, msg, 0);
- if (erts_use_sender_punish)
- res *= 4;
- else
- res = 0;
- erts_smp_proc_unlock(rp,
+ erts_send_message(p, rp, &rp_locks, msg);
+ erts_proc_unlock(rp,
p == rp
? (rp_locks & ~ERTS_PROC_LOCK_MAIN)
: rp_locks);
- return res;
+ return 0;
}
}
@@ -2330,7 +2083,6 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
Eterm msg = BIF_ARG_2;
Eterm opts = BIF_ARG_3;
- int connect = !0;
Eterm l = opts;
Sint result;
@@ -2341,14 +2093,15 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
UseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), BIF_P);
ctx->suspend = !0;
- ctx->dep_to_deref = NULL;
+ ctx->connect = !0;
+ ctx->deref_dep = 0;
ctx->return_term = am_ok;
ctx->dss.reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR);
ctx->dss.phase = ERTS_DSIG_SEND_PHASE_INIT;
while (is_list(l)) {
if (CAR(list_val(l)) == am_noconnect) {
- connect = 0;
+ ctx->connect = 0;
} else if (CAR(list_val(l)) == am_nosuspend) {
ctx->suspend = 0;
} else {
@@ -2370,8 +2123,8 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
result = do_send(p, to, msg, &ref, ctx);
ERTS_MSACC_POP_STATE_M_X();
- if (result > 0) {
- ERTS_VBUMP_REDS(p, result);
+ if (result >= 0) {
+ ERTS_VBUMP_REDS(p, 4);
if (ERTS_IS_PROC_OUT_OF_REDS(p))
goto yield_return;
ERTS_BIF_PREP_RET(retval, am_ok);
@@ -2379,15 +2132,9 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
}
switch (result) {
- case 0:
- /* May need to yield even though we do not bump reds here... */
- if (ERTS_IS_PROC_OUT_OF_REDS(p))
- goto yield_return;
- ERTS_BIF_PREP_RET(retval, am_ok);
- break;
- case SEND_TRAP:
- if (connect) {
- ERTS_BIF_PREP_TRAP3(retval, dsend3_trap, p, to, msg, opts);
+ case SEND_NOCONNECT:
+ if (ctx->connect) {
+ ERTS_BIF_PREP_RET(retval, am_ok);
} else {
ERTS_BIF_PREP_RET(retval, am_noconnect);
}
@@ -2409,7 +2156,7 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
ERTS_BIF_PREP_YIELD_RETURN(retval, p, am_ok);
break;
case SEND_AWAIT_RESULT:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
ERTS_BIF_PREP_TRAP3(retval, await_port_send_result_trap, p, ref, am_nosuspend, am_ok);
break;
case SEND_BADARG:
@@ -2446,7 +2193,7 @@ BIF_RETTYPE send_2(BIF_ALIST_2)
static BIF_RETTYPE dsend_continue_trap_1(BIF_ALIST_1)
{
- Binary* bin = ((ProcBin*) binary_val(BIF_ARG_1))->val;
+ Binary* bin = erts_magic_ref2bin(BIF_ARG_1);
ErtsSendContext* ctx = (ErtsSendContext*) ERTS_MAGIC_BIN_DATA(bin);
Sint initial_reds = (Sint) (ERTS_BIF_REDS_LEFT(BIF_P) * TERM_TO_BINARY_LOOP_FACTOR);
int result;
@@ -2491,7 +2238,8 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
ref = NIL;
#endif
ctx->suspend = !0;
- ctx->dep_to_deref = NULL;
+ ctx->connect = !0;
+ ctx->deref_dep = 0;
ctx->return_term = msg;
ctx->dss.reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR);
ctx->dss.phase = ERTS_DSIG_SEND_PHASE_INIT;
@@ -2500,8 +2248,8 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
ERTS_MSACC_POP_STATE_M_X();
- if (result > 0) {
- ERTS_VBUMP_REDS(p, result);
+ if (result >= 0) {
+ ERTS_VBUMP_REDS(p, 4);
if (ERTS_IS_PROC_OUT_OF_REDS(p))
goto yield_return;
ERTS_BIF_PREP_RET(retval, msg);
@@ -2509,15 +2257,9 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
}
switch (result) {
- case 0:
- /* May need to yield even though we do not bump reds here... */
- if (ERTS_IS_PROC_OUT_OF_REDS(p))
- goto yield_return;
+ case SEND_NOCONNECT:
ERTS_BIF_PREP_RET(retval, msg);
break;
- case SEND_TRAP:
- ERTS_BIF_PREP_TRAP2(retval, dsend2_trap, p, to, msg);
- break;
case SEND_YIELD:
ERTS_BIF_PREP_YIELD2(retval, bif_export[BIF_send_2], p, to, msg);
break;
@@ -2526,7 +2268,7 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
ERTS_BIF_PREP_YIELD_RETURN(retval, p, msg);
break;
case SEND_AWAIT_RESULT:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
ERTS_BIF_PREP_TRAP3(retval,
await_port_send_result_trap, p, ref, msg, msg);
break;
@@ -3022,18 +2764,18 @@ BIF_RETTYPE atom_to_list_1(BIF_ALIST_1)
BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
{
Eterm res;
- char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_CHARACTERS);
- Sint i = intlist_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS);
-
+ byte *buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_SZ_LIMIT);
+ Sint written;
+ int i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS,
+ &written);
if (i < 0) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
- i = erts_list_length(BIF_ARG_1);
- if (i > MAX_ATOM_CHARACTERS) {
+ if (i == -2) {
BIF_ERROR(BIF_P, SYSTEM_LIMIT);
}
BIF_ERROR(BIF_P, BADARG);
}
- res = erts_atom_put((byte *) buf, i, ERTS_ATOM_ENC_LATIN1, 1);
+ res = erts_atom_put(buf, written, ERTS_ATOM_ENC_UTF8, 1);
ASSERT(is_atom(res));
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(res);
@@ -3043,17 +2785,18 @@ BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
BIF_RETTYPE list_to_existing_atom_1(BIF_ALIST_1)
{
- Sint i;
- char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_CHARACTERS);
-
- if ((i = intlist_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS)) < 0) {
+ byte *buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_SZ_LIMIT);
+ Sint written;
+ int i = erts_unicode_list_to_buf(BIF_ARG_1, buf, MAX_ATOM_CHARACTERS,
+ &written);
+ if (i < 0) {
error:
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_ERROR(BIF_P, BADARG);
} else {
Eterm a;
- if (erts_atom_get(buf, i, &a, ERTS_ATOM_ENC_LATIN1)) {
+ if (erts_atom_get((char *) buf, written, &a, ERTS_ATOM_ENC_UTF8)) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(a);
} else {
@@ -3109,7 +2852,7 @@ BIF_RETTYPE integer_to_list_1(BIF_ALIST_1)
* On error returns: {error,not_a_list}, or {error, no_integer}
*/
-BIF_RETTYPE string_to_integer_1(BIF_ALIST_1)
+BIF_RETTYPE string_list_to_integer_1(BIF_ALIST_1)
{
Eterm res;
Eterm tail;
@@ -3175,7 +2918,7 @@ BIF_RETTYPE list_to_integer_2(BIF_ALIST_2)
static int do_float_to_charbuf(Process *p, Eterm efloat, Eterm list,
char *fbuf, int sizeof_fbuf) {
- const static int arity_two = make_arityval(2);
+ Eterm arity_two = make_arityval(2);
int decimals = SYS_DEFAULT_FLOAT_DECIMALS;
int compact = 0;
enum fmt_type_ {
@@ -3295,7 +3038,7 @@ BIF_RETTYPE float_to_binary_2(BIF_ALIST_2)
#define LOAD_E(xi,xim,xl,xlm) ((xi)=(xim), (xl)=(xlm))
#define STRING_TO_FLOAT_BUF_INC_SZ (128)
-BIF_RETTYPE string_to_float_1(BIF_ALIST_1)
+BIF_RETTYPE string_list_to_float_1(BIF_ALIST_1)
{
Eterm orig = BIF_ARG_1;
Eterm list = orig;
@@ -3539,7 +3282,7 @@ BIF_RETTYPE binary_to_float_1(BIF_ALIST_1)
if (bit_offs)
erts_copy_bits(bytes, bit_offs, 1, buf, 0, 1, size*8);
else
- memcpy(buf, bytes, size);
+ sys_memcpy(buf, bytes, size);
buf[size] = '\0';
@@ -3865,11 +3608,25 @@ BIF_RETTYPE now_0(BIF_ALIST_0)
/**********************************************************************/
-BIF_RETTYPE garbage_collect_0(BIF_ALIST_0)
+/*
+ * Pass atom 'minor' for relaxed generational GC run. This is only
+ * recommendation, major run may still be chosen by VM.
+ * Pass atom 'major' for default behaviour - major GC run (fullsweep)
+ */
+BIF_RETTYPE
+erts_internal_garbage_collect_1(BIF_ALIST_1)
{
- FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
+ switch (BIF_ARG_1) {
+ case am_minor: break;
+ case am_major: FLAGS(BIF_P) |= F_NEED_FULLSWEEP; break;
+ default: BIF_ERROR(BIF_P, BADARG);
+ }
erts_garbage_collect(BIF_P, 0, NULL, 0);
- BIF_RET(am_true);
+ if (ERTS_PROC_IS_EXITING(BIF_P)) {
+ /* The max heap size limit was reached. */
+ return THE_NON_VALUE;
+ }
+ return am_true;
}
/**********************************************************************/
@@ -3938,15 +3695,18 @@ BIF_RETTYPE display_string_1(BIF_ALIST_1)
{
Process* p = BIF_P;
Eterm string = BIF_ARG_1;
- Sint len = is_string(string);
- char *str;
+ Sint len = erts_unicode_list_to_buf_len(string);
+ Sint written;
+ byte *str;
+ int res;
- if (len <= 0) {
+ if (len < 0) {
BIF_ERROR(p, BADARG);
}
- str = (char *) erts_alloc(ERTS_ALC_T_TMP, sizeof(char)*(len + 1));
- if (intlist_to_buf(string, str, len) != len)
- erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ str = (byte *) erts_alloc(ERTS_ALC_T_TMP, sizeof(char)*(len + 1));
+ res = erts_unicode_list_to_buf(string, str, len, &written);
+ if (res != 0 || written != len)
+ erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error (%d)\n", __FILE__, __LINE__, res);
str[len] = '\0';
erts_fprintf(stderr, "%s", str);
erts_free(ERTS_ALC_T_TMP, (void *) str);
@@ -3962,9 +3722,6 @@ BIF_RETTYPE display_nl_0(BIF_ALIST_0)
/**********************************************************************/
-#define HALT_MSG_SIZE 200
-static char halt_msg[HALT_MSG_SIZE+1];
-
/* stop the system with exit code and flags */
BIF_RETTYPE halt_2(BIF_ALIST_2)
{
@@ -4004,29 +3761,30 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
ERTS_BIF_YIELD2(bif_export[BIF_halt_2], BIF_P, am_undefined, am_undefined);
}
else {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_exit(pos_int_code, "");
}
}
else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
VERBOSE(DEBUG_SYSTEM,
("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_exit(ERTS_ABORT_EXIT, "");
}
- else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
- Sint i;
+ else if (is_list(BIF_ARG_1) || BIF_ARG_1 == NIL) {
+# define HALT_MSG_SIZE 200
+ static byte halt_msg[4*HALT_MSG_SIZE+1];
+ Sint written;
- if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE)) == -1) {
+ if (erts_unicode_list_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE,
+ &written) == -1 ) {
goto error;
}
- if (i == -2) /* truncated string */
- i = HALT_MSG_SIZE;
- ASSERT(i >= 0 && i <= HALT_MSG_SIZE);
- halt_msg[i] = '\0';
+ ASSERT(written >= 0 && written < sizeof(halt_msg));
+ halt_msg[written] = '\0';
VERBOSE(DEBUG_SYSTEM,
("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_exit(ERTS_DUMP_EXIT, "%s\n", halt_msg);
}
else
@@ -4099,6 +3857,7 @@ BIF_RETTYPE ref_to_list_1(BIF_ALIST_1)
{
if (is_not_ref(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
+ erts_magic_ref_save_bin(BIF_ARG_1);
BIF_RET(term2list_dsprintf(BIF_P, BIF_ARG_1));
}
@@ -4211,7 +3970,6 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
goto bad;
if(dep == erts_this_dist_entry) {
- erts_deref_dist_entry(dep);
BIF_RET(make_internal_pid(make_pid_data(c, b)));
}
else {
@@ -4231,18 +3989,203 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
etp->data.ui[0] = make_pid_data(c, b);
MSO(BIF_P).first = (struct erl_off_heap_header*) etp;
- erts_deref_dist_entry(dep);
BIF_RET(make_external_pid(etp));
}
bad:
- if (dep)
- erts_deref_dist_entry(dep);
if (buf)
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE list_to_port_1(BIF_ALIST_1)
+{
+ /*
+ * A valid port identifier is on the format
+ * "#Port<N.P>" where N is node and P is
+ * the port id. Both N and P are of type Uint32.
+ */
+ Uint32 n, p;
+ char* cp;
+ int i;
+ DistEntry *dep = NULL;
+ char buf[6 /* #Port< */
+ + (2)*(10 + 1) /* N.P> */
+ + 1 /* \0 */];
+
+ /* walk down the list and create a C string */
+ if ((i = intlist_to_buf(BIF_ARG_1, buf, sizeof(buf)-1)) < 0)
+ goto bad;
+
+ buf[i] = '\0'; /* null terminal */
+
+ cp = &buf[0];
+ if (sys_strncmp("#Port<", cp, 6) != 0)
+ goto bad;
+
+ cp += 6; /* sys_strlen("#Port<") */
+
+ if (sscanf(cp, "%u.%u>", (unsigned int*)&n, (unsigned int*)&p) < 2)
+ goto bad;
+
+ if (p > ERTS_MAX_PORT_NUMBER)
+ goto bad;
+
+ dep = erts_channel_no_to_dist_entry(n);
+
+ if (!dep)
+ goto bad;
+
+ if(dep == erts_this_dist_entry) {
+ BIF_RET(make_internal_port(p));
+ }
+ else {
+ ExternalThing *etp;
+ ErlNode *enp;
+
+ if (is_nil(dep->cid))
+ goto bad;
+
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+ ASSERT(enp != erts_this_node);
+
+ etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
+ etp->header = make_external_port_header(1);
+ etp->next = MSO(BIF_P).first;
+ etp->node = enp;
+ etp->data.ui[0] = p;
+
+ MSO(BIF_P).first = (struct erl_off_heap_header*) etp;
+ BIF_RET(make_external_port(etp));
+ }
+
+ bad:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE list_to_ref_1(BIF_ALIST_1)
+{
+ /*
+ * A valid reference is on the format
+ * "#Ref<N.X.Y.Z>" where N, X, Y, and Z are
+ * 32-bit integers (i.e., max 10 characters).
+ */
+ Eterm *hp;
+ Eterm res;
+ Uint32 refn[ERTS_MAX_REF_NUMBERS];
+ int n = 0;
+ Uint ints[1 + ERTS_MAX_REF_NUMBERS] = {0};
+ char* cp;
+ Sint i;
+ DistEntry *dep = NULL;
+ char buf[5 /* #Ref< */
+ + (1 + ERTS_MAX_REF_NUMBERS)*(10 + 1) /* N.X.Y.Z> */
+ + 1 /* \0 */];
+
+ /* walk down the list and create a C string */
+ if ((i = intlist_to_buf(BIF_ARG_1, buf, sizeof(buf)-1)) < 0)
+ goto bad;
+
+ buf[i] = '\0'; /* null terminal */
+
+ cp = &buf[0];
+ if (*cp++ != '#') goto bad;
+ if (*cp++ != 'R') goto bad;
+ if (*cp++ != 'e') goto bad;
+ if (*cp++ != 'f') goto bad;
+ if (*cp++ != '<') goto bad;
+
+ for (i = 0; i < sizeof(ints)/sizeof(Uint); i++) {
+ if (*cp < '0' || *cp > '9') goto bad;
+
+ while (*cp >= '0' && *cp <= '9') {
+ ints[i] = 10*ints[i] + (*cp - '0');
+ cp++;
+ }
+
+ n++;
+ if (ints[i] > ~((Uint32) 0)) goto bad;
+ if (*cp == '>') break;
+ if (*cp++ != '.') goto bad;
+ }
+
+ if (*cp++ != '>') goto bad;
+ if (*cp != '\0') goto bad;
+
+ if (n < 2) goto bad;
+
+ for (n = 0; i > 0; i--)
+ refn[n++] = (Uint32) ints[i];
+
+ ASSERT(n <= ERTS_MAX_REF_NUMBERS);
+
+ dep = erts_channel_no_to_dist_entry(ints[0]);
+
+ if (!dep)
+ goto bad;
+
+ if(dep == erts_this_dist_entry) {
+ ErtsMagicBinary *mb;
+ Uint32 sid;
+ if (refn[0] > MAX_REFERENCE) goto bad;
+ if (n != ERTS_REF_NUMBERS) goto bad;
+ sid = erts_get_ref_numbers_thr_id(refn);
+ if (sid > erts_no_schedulers) goto bad;
+ mb = erts_magic_ref_lookup_bin(refn);
+ if (mb) {
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ res = erts_mk_magic_ref(&hp, &BIF_P->off_heap,
+ (Binary *) mb);
+ }
+ else {
+ hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE);
+ write_ref_thing(hp, refn[0], refn[1], refn[2]);
+ res = make_internal_ref(hp);
+ }
+ }
+ else {
+ ExternalThing *etp;
+ ErlNode *enp;
+ Uint hsz;
+ int j;
+
+ if (is_nil(dep->cid))
+ goto bad;
+
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+ ASSERT(enp != erts_this_node);
+
+ hsz = EXTERNAL_THING_HEAD_SIZE;
+#if defined(ARCH_64)
+ hsz += n/2 + 1;
+#else
+ hsz += n;
+#endif
+
+ etp = (ExternalThing *) HAlloc(BIF_P, hsz);
+ etp->header = make_external_ref_header(n/2);
+ etp->next = BIF_P->off_heap.first;
+ etp->node = enp;
+ i = 0;
+#if defined(ARCH_64)
+ etp->data.ui32[i] = n;
+#endif
+ for (j = 0; j < n; j++) {
+ etp->data.ui32[i] = refn[j];
+ i++;
+ }
+
+ BIF_P->off_heap.first = (struct erl_off_heap_header*) etp;
+ res = make_external_ref(etp);
+ }
+
+ BIF_RET(res);
+
+ bad:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
/**********************************************************************/
BIF_RETTYPE group_leader_0(BIF_ALIST_0)
@@ -4251,14 +4194,89 @@ BIF_RETTYPE group_leader_0(BIF_ALIST_0)
}
/**********************************************************************/
-/* arg1 == leader, arg2 == new member */
+/* set group leader */
-BIF_RETTYPE group_leader_2(BIF_ALIST_2)
+int
+erts_set_group_leader(Process *proc, Eterm new_gl)
{
- Process* new_member;
+
+ erts_aint32_t state;
- if (is_not_pid(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+ ASSERT(is_pid(new_gl));
+
+ state = erts_atomic32_read_nob(&proc->state);
+
+ if (state & ERTS_PSFLG_EXITING)
+ return 0;
+
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(proc));
+
+ if (!(state & ERTS_PSFLG_DIRTY_RUNNING))
+ proc->group_leader = STORE_NC_IN_PROC(proc, new_gl);
+ else {
+ ErlHeapFragment *bp;
+ Eterm *hp;
+ /*
+ * Currently executing on a dirty scheduler,
+ * so we are not allowed to write to its heap.
+ * Store group leader pid in heap fragment.
+ */
+ bp = new_message_buffer(NC_HEAP_SIZE(new_gl));
+ hp = bp->mem;
+ proc->group_leader = STORE_NC(&hp,
+ &proc->off_heap,
+ new_gl);
+ bp->next = proc->mbuf;
+ proc->mbuf = bp;
+ proc->mbuf_sz += bp->used_size;
+ }
+
+ return !0;
+}
+
+BIF_RETTYPE erts_internal_group_leader_3(BIF_ALIST_3)
+{
+ if (is_not_pid(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+ if (is_not_internal_pid(BIF_ARG_2))
+ BIF_ERROR(BIF_P, BADARG);
+ if (is_not_internal_ref(BIF_ARG_3))
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_proc_sig_send_group_leader(BIF_P,
+ BIF_ARG_2,
+ BIF_ARG_1,
+ BIF_ARG_3);
+ BIF_RET(am_ok);
+}
+
+BIF_RETTYPE erts_internal_group_leader_2(BIF_ALIST_2)
+{
+ if (is_not_pid(BIF_ARG_1))
+ BIF_RET(am_badarg);
+
+ if (is_internal_pid(BIF_ARG_2)) {
+ Process *rp;
+ int res;
+
+ if (BIF_ARG_2 == BIF_P->common.id)
+ rp = BIF_P;
+ else {
+ rp = erts_try_lock_sig_free_proc(BIF_ARG_2,
+ ERTS_PROC_LOCK_MAIN,
+ NULL);
+ if (!rp)
+ BIF_RET(am_badarg);
+ if (rp == ERTS_PROC_LOCK_BUSY)
+ BIF_RET(am_false);
+ }
+
+ res = erts_set_group_leader(rp, BIF_ARG_1);
+
+ if (rp != BIF_P)
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+
+ BIF_RET(res ? am_true : am_badarg);
}
if (is_external_pid(BIF_ARG_2)) {
@@ -4266,95 +4284,30 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2)
int code;
ErtsDSigData dsd;
dep = external_pid_dist_entry(BIF_ARG_2);
+ ERTS_ASSERT(dep);
if(dep == erts_this_dist_entry)
BIF_ERROR(BIF_P, BADARG);
- code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_NO_LOCK, 0);
+ code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_NO_LOCK, 0, 1);
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
- BIF_RET(am_true);
case ERTS_DSIG_PREP_NOT_CONNECTED:
- BIF_TRAP2(dgroup_leader_trap, BIF_P, BIF_ARG_1, BIF_ARG_2);
+ BIF_RET(am_true);
+ case ERTS_DSIG_PREP_PENDING:
case ERTS_DSIG_PREP_CONNECTED:
code = erts_dsig_send_group_leader(&dsd, BIF_ARG_1, BIF_ARG_2);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
BIF_RET(am_true);
default:
- ASSERT(! "Invalid dsig prepare result");
- BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ ERTS_ASSERT(! "Invalid dsig prepare result");
}
}
- else if (is_internal_pid(BIF_ARG_2)) {
- int await_x;
- ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS;
- new_member = erts_pid2proc_nropt(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_2, locks);
- if (!new_member)
- BIF_ERROR(BIF_P, BADARG);
- if (new_member == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_group_leader_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
-
- await_x = (new_member != BIF_P
- && ERTS_PROC_PENDING_EXIT(new_member));
- if (!await_x) {
- if (is_immed(BIF_ARG_1))
- new_member->group_leader = BIF_ARG_1;
- else {
- locks &= ~ERTS_PROC_LOCK_STATUS;
- erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS);
- if (new_member == BIF_P
- || !(erts_smp_atomic32_read_nob(&new_member->state)
- & (ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS))) {
- new_member->group_leader = STORE_NC_IN_PROC(new_member,
- BIF_ARG_1);
- }
- else {
- ErlHeapFragment *bp;
- Eterm *hp;
- /*
- * Other process executing on a dirty scheduler,
- * so we are not allowed to write to its heap.
- * Store in heap fragment.
- */
-
- bp = new_message_buffer(NC_HEAP_SIZE(BIF_ARG_1));
- hp = bp->mem;
- new_member->group_leader = STORE_NC(&hp,
- &new_member->off_heap,
- BIF_ARG_1);
- bp->next = new_member->mbuf;
- new_member->mbuf = bp;
- new_member->mbuf_sz += bp->used_size;
- }
- }
- }
-
- if (new_member == BIF_P)
- locks &= ~ERTS_PROC_LOCK_MAIN;
- if (locks)
- erts_smp_proc_unlock(new_member, locks);
-
- if (await_x) {
- /* Wait for new_member to terminate; then badarg */
- Eterm args[2] = {BIF_ARG_1, BIF_ARG_2};
- ERTS_BIF_AWAIT_X_APPLY_TRAP(BIF_P,
- BIF_ARG_2,
- am_erlang,
- am_group_leader,
- args,
- 2);
- }
-
- BIF_RET(am_true);
- }
- else {
- BIF_ERROR(BIF_P, BADARG);
- }
+ BIF_RET(am_badarg);
}
-
+
BIF_RETTYPE system_flag_2(BIF_ALIST_2)
{
Sint n;
@@ -4362,57 +4315,43 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
if (BIF_ARG_1 == am_multi_scheduling) {
if (BIF_ARG_2 == am_block || BIF_ARG_2 == am_unblock
|| BIF_ARG_2 == am_block_normal || BIF_ARG_2 == am_unblock_normal) {
-#ifndef ERTS_SMP
- BIF_RET(am_disabled);
-#else
int block = (BIF_ARG_2 == am_block
|| BIF_ARG_2 == am_block_normal);
int normal = (BIF_ARG_2 == am_block_normal
|| BIF_ARG_2 == am_unblock_normal);
- if (erts_no_schedulers == 1)
- BIF_RET(am_disabled);
- else {
- switch (erts_block_multi_scheduling(BIF_P,
- ERTS_PROC_LOCK_MAIN,
- block,
- normal,
- 0)) {
- case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
- BIF_RET(am_blocked);
- case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
- BIF_RET(am_blocked_normal);
- case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
- ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked,
- am_multi_scheduling);
- case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
- ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked_normal,
- am_multi_scheduling);
- case ERTS_SCHDLR_SSPND_DONE:
- BIF_RET(am_enabled);
- case ERTS_SCHDLR_SSPND_YIELD_RESTART:
- ERTS_VBUMP_ALL_REDS(BIF_P);
- BIF_TRAP2(bif_export[BIF_system_flag_2],
- BIF_P, BIF_ARG_1, BIF_ARG_2);
- case ERTS_SCHDLR_SSPND_YIELD_DONE:
- ERTS_BIF_YIELD_RETURN_X(BIF_P, am_enabled,
- am_multi_scheduling);
- case ERTS_SCHDLR_SSPND_EINVAL:
- goto error;
- default:
- ASSERT(0);
- BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
- break;
- }
- }
-#endif
+ switch (erts_block_multi_scheduling(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ block,
+ normal,
+ 0)) {
+ case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
+ BIF_RET(am_blocked);
+ case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
+ BIF_RET(am_blocked_normal);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked,
+ am_multi_scheduling);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked_normal,
+ am_multi_scheduling);
+ case ERTS_SCHDLR_SSPND_DONE:
+ BIF_RET(am_enabled);
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_system_flag_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, am_enabled,
+ am_multi_scheduling);
+ case ERTS_SCHDLR_SSPND_EINVAL:
+ goto error;
+ default:
+ ASSERT(0);
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ break;
+ }
}
} else if (BIF_ARG_1 == am_schedulers_online) {
-#ifndef ERTS_SMP
- if (BIF_ARG_2 != make_small(1))
- goto error;
- else
- BIF_RET(make_small(1));
-#else
Sint old_no;
if (!is_small(BIF_ARG_2))
goto error;
@@ -4436,7 +4375,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
break;
}
-#endif
} else if (BIF_ARG_1 == am_fullsweep_after) {
Uint16 nval;
Uint oval;
@@ -4444,7 +4382,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
goto error;
}
nval = (n > (Sint) ((Uint16) -1)) ? ((Uint16) -1) : ((Uint16) n);
- oval = (Uint) erts_smp_atomic32_xchg_nob(&erts_max_gen_gcs,
+ oval = (Uint) erts_atomic32_xchg_nob(&erts_max_gen_gcs,
(erts_aint32_t) nval);
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_min_heap_size) {
@@ -4454,13 +4392,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
goto error;
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
H_MIN_SIZE = erts_next_heap_size(n, 0);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_min_bin_vheap_size) {
@@ -4470,13 +4408,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
goto error;
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
BIN_VH_MIN_SIZE = erts_next_heap_size(n, 0);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_max_heap_size) {
@@ -4494,14 +4432,14 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
hp = HAlloc(BIF_P, sz);
old_value = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
H_MAX_SIZE = max_heap_size;
H_MAX_FLAGS = max_heap_flags;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(old_value);
} else if (BIF_ARG_1 == am_display_items) {
@@ -4554,9 +4492,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_RET(ret);
} else if (BIF_ARG_1 == make_small(1)) {
int i, max;
- ErtsMessage* mp;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
max = erts_ptab_max(&erts_proc);
for (i = 0; i < max; i++) {
@@ -4569,36 +4506,19 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
#endif
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
- mp = p->msg.first;
- while(mp != NULL) {
-#ifdef USE_VM_PROBES
- ERL_MESSAGE_TOKEN(mp) = (ERL_MESSAGE_DT_UTAG(mp) != NIL) ? am_have_dt_utag : NIL;
-#else
- ERL_MESSAGE_TOKEN(mp) = NIL;
-#endif
- mp = mp->next;
- }
+
+ erts_proc_sig_clear_seq_trace_tokens(p);
}
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
} else if (BIF_ARG_1 == am_scheduler_wall_time) {
- if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
- erts_aint32_t new = BIF_ARG_2 == am_true ? 1 : 0;
- erts_aint32_t old = erts_smp_atomic32_xchg_nob(&sched_wall_time,
- new);
- Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new);
- ASSERT(is_value(ref));
- BIF_TRAP2(await_sched_wall_time_mod_trap,
- BIF_P,
- ref,
- old ? am_true : am_false);
- }
-#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
+ if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)
+ BIF_TRAP1(system_flag_scheduler_wall_time_trap,
+ BIF_P, BIF_ARG_2);
} else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) {
Sint old_no;
if (!is_small(BIF_ARG_2))
@@ -4624,13 +4544,12 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
break;
}
-#endif
} else if (BIF_ARG_1 == am_time_offset
&& ERTS_IS_ATOM_STR("finalize", BIF_ARG_2)) {
ErtsTimeOffsetState res;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
res = erts_finalize_time_offset();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
switch (res) {
case ERTS_TIME_OFFSET_PRELIMINARY: {
DECL_AM(preliminary);
@@ -4652,7 +4571,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
Eterm threads;
if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
erts_aint32_t new = BIF_ARG_2 == am_true ? ERTS_MSACC_ENABLE : ERTS_MSACC_DISABLE;
- erts_aint32_t old = erts_smp_atomic32_xchg_nob(&msacc, new);
+ erts_aint32_t old = erts_atomic32_xchg_nob(&msacc, new);
Eterm ref = erts_msacc_request(BIF_P, new, &threads);
if (is_non_value(ref))
BIF_RET(old ? am_true : am_false);
@@ -4663,7 +4582,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
threads);
} else if (BIF_ARG_2 == am_reset) {
Eterm ref = erts_msacc_request(BIF_P, ERTS_MSACC_RESET, &threads);
- erts_aint32_t old = erts_smp_atomic32_read_nob(&msacc);
+ erts_aint32_t old = erts_atomic32_read_nob(&msacc);
ASSERT(is_value(ref));
BIF_TRAP3(await_msacc_mod_trap,
BIF_P,
@@ -4682,9 +4601,9 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
what = ERTS_SCHED_STAT_MODIFY_CLEAR;
else
goto error;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_sched_stat_modify(what);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
} else if (ERTS_IS_ATOM_STR("internal_cpu_topology", BIF_ARG_1)) {
Eterm res = erts_set_cpu_topology(BIF_P, BIF_ARG_2);
@@ -4706,32 +4625,26 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
"scheduled for removal in Erlang/OTP 18. For more\n"
"information see the erlang:system_flag/2 documentation.\n");
return erts_bind_schedulers(BIF_P, BIF_ARG_2);
+ } else if (ERTS_IS_ATOM_STR("erts_alloc", BIF_ARG_1)) {
+ return erts_alloc_set_dyn_param(BIF_P, BIF_ARG_2);
}
error:
BIF_ERROR(BIF_P, BADARG);
}
-/**********************************************************************/
-
-BIF_RETTYPE hash_2(BIF_ALIST_2)
+BIF_RETTYPE erts_internal_scheduler_wall_time_1(BIF_ALIST_1)
{
- Uint32 hash;
- Sint range;
-
- if (is_not_small(BIF_ARG_2)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if ((range = signed_val(BIF_ARG_2)) <= 0) { /* [1..MAX_SMALL] */
- BIF_ERROR(BIF_P, BADARG);
- }
-#if defined(ARCH_64)
- if (range > ((1L << 27) - 1))
- BIF_ERROR(BIF_P, BADARG);
-#endif
- hash = make_broken_hash(BIF_ARG_1);
- BIF_RET(make_small(1 + (hash % range))); /* [1..range] */
+ erts_aint32_t new = BIF_ARG_1 == am_true ? 1 : 0;
+ erts_aint32_t old = erts_atomic32_xchg_nob(&sched_wall_time,
+ new);
+ Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new, 0, 0);
+ ASSERT(is_value(ref));
+ BIF_TRAP2(await_sched_wall_time_mod_trap,
+ BIF_P, ref, old ? am_true : am_false);
}
+/**********************************************************************/
+
BIF_RETTYPE phash_2(BIF_ALIST_2)
{
Uint32 hash;
@@ -4841,7 +4754,6 @@ static BIF_RETTYPE bif_return_trap(BIF_ALIST_2)
Eterm res = BIF_ARG_1;
switch (BIF_ARG_2) {
-#ifdef ERTS_SMP
case am_multi_scheduling: {
int msb = erts_is_multi_scheduling_blocked();
if (msb > 0)
@@ -4852,114 +4764,31 @@ static BIF_RETTYPE bif_return_trap(BIF_ALIST_2)
ERTS_INTERNAL_ERROR("Unexpected multi scheduling block state");
break;
}
-#endif
default:
break;
}
BIF_RET(res);
}
-/*
- * NOTE: The erts_bif_prep_await_proc_exit_*() functions are
- * tightly coupled with the implementation of erlang:await_proc_exit/3.
- * The erts_bif_prep_await_proc_exit_*() functions can safely call
- * skip_current_msgq() since they know that erlang:await_proc_exit/3
- * unconditionally will do a monitor and then unconditionally will
- * wait for the corresponding 'DOWN' message in a receive, and no other
- * receive is done before this receive. This optimization removes an
- * unnecessary scan of the currently existing message queue (which
- * can be large). If the erlang:await_proc_exit/3 implementation
- * is changed so that the above isn't true, nasty bugs in later
- * receives, etc, may appear.
- */
-
-static ERTS_INLINE int
-skip_current_msgq(Process *c_p)
-{
- int res;
-#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
- erts_proc_lc_chk_only_proc_main(c_p);
-#endif
-
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- if (ERTS_PROC_PENDING_EXIT(c_p)) {
- KILL_CATCHES(c_p);
- c_p->freason = EXC_EXIT;
- res = 0;
- }
- else {
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
- c_p->msg.save = c_p->msg.last;
- res = 1;
- }
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- return res;
-}
-
-void
-erts_bif_prep_await_proc_exit_data_trap(Process *c_p, Eterm pid, Eterm ret)
-{
- if (skip_current_msgq(c_p)) {
- ERTS_BIF_PREP_TRAP3_NO_RET(await_proc_exit_trap, c_p, pid, am_data, ret);
- }
-}
-
-void
-erts_bif_prep_await_proc_exit_reason_trap(Process *c_p, Eterm pid)
-{
- if (skip_current_msgq(c_p)) {
- ERTS_BIF_PREP_TRAP3_NO_RET(await_proc_exit_trap, c_p,
- pid, am_reason, am_undefined);
- }
-}
-
-void
-erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
- Eterm pid,
- Eterm module,
- Eterm function,
- Eterm args[],
- int nargs)
-{
- ASSERT(is_atom(module) && is_atom(function));
- if (skip_current_msgq(c_p)) {
- Eterm term;
- Eterm *hp;
- int i;
-
- hp = HAlloc(c_p, 4+2*nargs);
- term = NIL;
- for (i = nargs-1; i >= 0; i--) {
- term = CONS(hp, args[i], term);
- hp += 2;
- }
- term = TUPLE3(hp, module, function, term);
- ERTS_BIF_PREP_TRAP3_NO_RET(await_proc_exit_trap, c_p, pid, am_apply, term);
- }
-}
-
Export bif_return_trap_export;
void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
- Eterm (*bif)(BIF_ALIST_0))
+ Eterm (*bif)(BIF_ALIST))
{
int i;
sys_memset((void *) ep, 0, sizeof(Export));
for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- ep->addressv[i] = &ep->code[3];
+ ep->addressv[i] = ep->beam;
}
- ep->code[0] = m;
- ep->code[1] = f;
- ep->code[2] = a;
- ep->code[3] = (BeamInstr) em_apply_bif;
- ep->code[4] = (BeamInstr) bif;
+ ep->info.mfa.module = m;
+ ep->info.mfa.function = f;
+ ep->info.mfa.arity = a;
+ ep->beam[0] = BeamOpCodeAddr(op_apply_bif);
+ ep->beam[1] = (BeamInstr) bif;
}
void erts_init_bif(void)
{
- erts_smp_mtx_init(&ports_snapshot_mtx, "ports_snapshot");
- erts_smp_atomic_init_nob(&erts_dead_ports_ptr, (erts_aint_t) NULL);
-
/*
* bif_return_trap/2 is a hidden BIF that bifs that need to
* yield the calling process traps to.
@@ -4976,6 +4805,9 @@ void erts_init_bif(void)
am_erts_internal, am_dsend_continue_trap, 1,
dsend_continue_trap_1);
+ erts_init_trap_export(&await_exit_trap, am_erts_internal,
+ am_await_exit, 0, erts_internal_await_exit_trap);
+
flush_monitor_messages_trap = erts_export_put(am_erts_internal,
am_flush_monitor_messages,
3);
@@ -4990,18 +4822,315 @@ void erts_init_bif(void)
erts_format_cpu_topology_trap = erts_export_put(am_erlang,
am_format_cpu_topology,
1);
- await_proc_exit_trap = erts_export_put(am_erlang,am_await_proc_exit,3);
await_port_send_result_trap
= erts_export_put(am_erts_internal, am_await_port_send_result, 3);
+ system_flag_scheduler_wall_time_trap
+ = erts_export_put(am_erts_internal, am_system_flag_scheduler_wall_time, 1);
await_sched_wall_time_mod_trap
- = erts_export_put(am_erlang, am_await_sched_wall_time_modifications, 2);
+ = erts_export_put(am_erts_internal, am_await_sched_wall_time_modifications, 2);
await_msacc_mod_trap
= erts_export_put(am_erts_internal, am_await_microstate_accounting_modifications, 3);
- erts_smp_atomic32_init_nob(&sched_wall_time, 0);
- erts_smp_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED());
+ erts_atomic32_init_nob(&sched_wall_time, 0);
+ erts_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED());
+}
+
+/*
+ * Scheduling of BIFs via NifExport...
+ */
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
+
+#define ERTS_SCHED_BIF_TRAP_MARKER ((void *) (UWord) 1)
+
+static ERTS_INLINE void
+schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ ErtsBifFunc dfunc, void *ifunc,
+ Eterm module, Eterm function,
+ int argc, Eterm *argv)
+{
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+ (void) erts_nif_export_schedule(c_p, dirty_shadow_proc,
+ mfa, pc, BeamOpCodeAddr(op_apply_bif),
+ dfunc, ifunc,
+ module, function,
+ argc, argv);
+}
+
+
+static BIF_RETTYPE dirty_bif_result(BIF_ALIST_1)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(BIF_P);
+ erts_nif_export_restore(BIF_P, nep, BIF_ARG_1);
+ BIF_RET(BIF_ARG_1);
+}
+
+static BIF_RETTYPE dirty_bif_trap(BIF_ALIST)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(BIF_P);
+
+ /*
+ * Arity and argument registers already set
+ * correct by call to dirty_bif_trap()...
+ */
+
+ ASSERT(BIF_P->arity == nep->exp.info.mfa.arity);
+
+ erts_nif_export_restore(BIF_P, nep, THE_NON_VALUE);
+
+ BIF_P->i = (BeamInstr *) nep->func;
+ BIF_P->freason = TRAP;
+ return THE_NON_VALUE;
+}
+
+static BIF_RETTYPE dirty_bif_exception(BIF_ALIST_2)
+{
+ Eterm freason;
+
+ ASSERT(is_small(BIF_ARG_1));
+
+ freason = signed_val(BIF_ARG_1);
+
+ /* Restore orig info for error and clear nif export in handle_error() */
+ freason |= EXF_RESTORE_NIF;
+
+ BIF_P->fvalue = BIF_ARG_2;
+
+ BIF_ERROR(BIF_P, freason);
}
+
+static BIF_RETTYPE call_bif(Process *c_p, Eterm *reg, BeamInstr *I);
+
+BIF_RETTYPE
+erts_schedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc bif,
+ ErtsSchedType sched_type,
+ Eterm mod,
+ Eterm func,
+ int argc)
+{
+ Process *c_p, *dirty_shadow_proc;
+ ErtsCodeMFA *mfa;
+
+ if (proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ dirty_shadow_proc = proc;
+ c_p = proc->next;
+ ASSERT(c_p->common.id == dirty_shadow_proc->common.id);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ else
+ {
+ dirty_shadow_proc = NULL;
+ c_p = proc;
+ }
+
+ if (!ERTS_PROC_IS_EXITING(c_p)) {
+ Export *exp;
+ BifFunction dbif, ibif;
+ BeamInstr *pc;
+
+ /*
+ * dbif - direct bif
+ * ibif - indirect bif
+ */
+
+ erts_aint32_t set, mask;
+ mask = (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC);
+ switch (sched_type) {
+ case ERTS_SCHED_DIRTY_CPU:
+ set = ERTS_PSFLG_DIRTY_CPU_PROC;
+ dbif = bif;
+ ibif = NULL;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ set = ERTS_PSFLG_DIRTY_IO_PROC;
+ dbif = bif;
+ ibif = NULL;
+ break;
+ case ERTS_SCHED_NORMAL:
+ default:
+ set = 0;
+ dbif = call_bif;
+ ibif = bif;
+ break;
+ }
+
+ (void) erts_atomic32_read_bset_nob(&c_p->state, mask, set);
+
+ if (i == NULL) {
+ ERTS_INTERNAL_ERROR("Missing instruction pointer");
+ }
+#ifdef HIPE
+ else if (proc->flags & F_HIPE_MODE) {
+ /* Pointer to bif export in i */
+ exp = (Export *) i;
+ pc = c_p->cp;
+ mfa = &exp->info.mfa;
+ }
+#endif
+ else if (BeamIsOpCode(*i, op_call_bif_e)) {
+ /* Pointer to bif export in i+1 */
+ exp = (Export *) i[1];
+ pc = i;
+ mfa = &exp->info.mfa;
+ }
+ else if (BeamIsOpCode(*i, op_apply_bif)) {
+ /* Pointer to bif in i+1, and mfa in i-3 */
+ pc = c_p->cp;
+ mfa = erts_code_to_codemfa(i);
+ }
+ else {
+ ERTS_INTERNAL_ERROR("erts_schedule_bif() called "
+ "from unexpected instruction");
+ }
+ ASSERT(bif);
+
+ if (argc < 0) { /* reschedule original call */
+ mod = mfa->module;
+ func = mfa->function;
+ argc = (int) mfa->arity;
+ }
+
+ schedule(c_p, dirty_shadow_proc, mfa, pc, dbif, ibif,
+ mod, func, argc, argv);
+ }
+
+ if (dirty_shadow_proc)
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ return THE_NON_VALUE;
+}
+
+static BIF_RETTYPE
+call_bif(Process *c_p, Eterm *reg, BeamInstr *I)
+{
+ NifExport *nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ErtsBifFunc bif = (ErtsBifFunc) nep->func;
+ BIF_RETTYPE ret;
+
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+
+ nep->func = ERTS_SCHED_BIF_TRAP_MARKER;
+
+ ASSERT(bif);
+
+ ret = (*bif)(c_p, reg, I);
+
+ if (is_value(ret))
+ erts_nif_export_restore(c_p, nep, ret);
+ else if (c_p->freason != TRAP)
+ c_p->freason |= EXF_RESTORE_NIF; /* restore in handle_error() */
+ else if (nep->func == ERTS_SCHED_BIF_TRAP_MARKER) {
+ /* BIF did an ordinary trap... */
+ erts_nif_export_restore(c_p, nep, ret);
+ }
+ /* else:
+ * BIF rescheduled itself using erts_schedule_bif().
+ */
+
+ return ret;
+}
+
+
+int
+erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg)
+{
+ BIF_RETTYPE result;
+ int exiting;
+ Process *dirty_shadow_proc;
+ ErtsBifFunc bf;
+ NifExport *nep;
+#ifdef DEBUG
+ Eterm *c_p_htop;
+ erts_aint32_t state;
+
+ ASSERT(!c_p->scheduler_data);
+ state = erts_atomic32_read_nob(&c_p->state);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(esdp);
+
+#endif
+
+ nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p));
+
+ nep->func = ERTS_SCHED_BIF_TRAP_MARKER;
+
+ bf = (ErtsBifFunc) I[1];
+
+ erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC));
+
+ dirty_shadow_proc = erts_make_dirty_shadow_proc(esdp, c_p);
+
+ dirty_shadow_proc->freason = c_p->freason;
+ dirty_shadow_proc->fvalue = c_p->fvalue;
+ dirty_shadow_proc->ftrace = c_p->ftrace;
+ dirty_shadow_proc->cp = c_p->cp;
+ dirty_shadow_proc->i = c_p->i;
+
+#ifdef DEBUG
+ c_p_htop = c_p->htop;
+#endif
+
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ result = (*bf)(dirty_shadow_proc, reg, I);
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(c_p_htop == c_p->htop);
+ ASSERT(dirty_shadow_proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(dirty_shadow_proc->next == c_p);
+
+ exiting = ERTS_PROC_IS_EXITING(c_p);
+
+ if (!exiting) {
+ if (is_value(result))
+ schedule(c_p, dirty_shadow_proc, NULL, NULL, dirty_bif_result,
+ NULL, am_erts_internal, am_dirty_bif_result, 1, &result);
+ else if (dirty_shadow_proc->freason != TRAP) {
+ Eterm argv[2];
+ ASSERT(dirty_shadow_proc->freason <= MAX_SMALL);
+ argv[0] = make_small(dirty_shadow_proc->freason);
+ argv[1] = dirty_shadow_proc->fvalue;
+ schedule(c_p, dirty_shadow_proc, NULL, NULL,
+ dirty_bif_exception, NULL, am_erts_internal,
+ am_dirty_bif_exception, 2, argv);
+ }
+ else if (nep->func == ERTS_SCHED_BIF_TRAP_MARKER) {
+ /* Dirty BIF did an ordinary trap... */
+ ASSERT(!(erts_atomic32_read_nob(&c_p->state)
+ & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)));
+ schedule(c_p, dirty_shadow_proc, NULL, NULL,
+ dirty_bif_trap, (void *) dirty_shadow_proc->i,
+ am_erts_internal, am_dirty_bif_trap,
+ dirty_shadow_proc->arity, reg);
+ }
+ /* else:
+ * BIF rescheduled itself using erts_schedule_bif().
+ */
+ c_p->freason = dirty_shadow_proc->freason;
+ c_p->fvalue = dirty_shadow_proc->fvalue;
+ c_p->ftrace = dirty_shadow_proc->ftrace;
+ c_p->cp = dirty_shadow_proc->cp;
+ c_p->i = dirty_shadow_proc->i;
+ c_p->arity = dirty_shadow_proc->arity;
+ }
+
+ erts_flush_dirty_shadow_proc(dirty_shadow_proc);
+
+ return exiting;
+}
+
+
+
#ifdef HARDDEBUG
/*
You'll need this line in bif.tab to be able to use this debug bif
@@ -5256,12 +5385,10 @@ BIF_RETTYPE dt_restore_tag_1(BIF_ALIST_1)
SEQ_TRACE_TOKEN(BIF_P) = am_have_dt_utag;
}
}
-#else
+#else
if (BIF_ARG_1 != am_true) {
BIF_ERROR(BIF_P,BADARG);
}
#endif
BIF_RET(am_true);
}
-
-
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 2203182a0d..cf9f61c0b8 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -29,17 +29,35 @@ extern Export *erts_convert_time_unit_trap;
#define BIF_P A__p
-#define BIF_ALIST_0 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_1 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_2 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_3 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ALIST_4 Process* A__p, Eterm* BIF__ARGS
+#define BIF_ALIST Process* A__p, Eterm* BIF__ARGS, BeamInstr *A__I
+#define BIF_CALL_ARGS A__p, BIF__ARGS, A__I
+
+#define BIF_ALIST_0 BIF_ALIST
+#define BIF_ALIST_1 BIF_ALIST
+#define BIF_ALIST_2 BIF_ALIST
+#define BIF_ALIST_3 BIF_ALIST
+#define BIF_ALIST_4 BIF_ALIST
#define BIF_ARG_1 (BIF__ARGS[0])
#define BIF_ARG_2 (BIF__ARGS[1])
#define BIF_ARG_3 (BIF__ARGS[2])
#define BIF_ARG_4 (BIF__ARGS[3])
+#define BIF_I A__I
+
+/* NBIF_* is for bif calls from native code... */
+
+#define NBIF_ALIST Process* A__p, Eterm* BIF__ARGS
+#define NBIF_CALL_ARGS A__p, BIF__ARGS
+
+#define NBIF_ALIST_0 NBIF_ALIST
+#define NBIF_ALIST_1 NBIF_ALIST
+#define NBIF_ALIST_2 NBIF_ALIST
+#define NBIF_ALIST_3 NBIF_ALIST
+#define NBIF_ALIST_4 NBIF_ALIST
+
+typedef BIF_RETTYPE (*ErtsBifFunc)(BIF_ALIST);
+
#define ERTS_IS_PROC_OUT_OF_REDS(p) \
((p)->fcalls > 0 \
? 0 \
@@ -75,7 +93,7 @@ do { \
#define BUMP_REDS(p, gc) do { \
ASSERT(p); \
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));\
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));\
(p)->fcalls -= (gc); \
if ((p)->fcalls < 0) { \
if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) \
@@ -159,7 +177,7 @@ do { \
#define ERTS_BIF_ERROR_TRAPPED0(Proc, Reason, Bif) \
do { \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
return THE_NON_VALUE; \
} while (0)
@@ -167,7 +185,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
return THE_NON_VALUE; \
} while (0)
@@ -176,7 +194,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
return THE_NON_VALUE; \
@@ -186,7 +204,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
reg[2] = (Eterm) (A2); \
@@ -202,7 +220,7 @@ do { \
#define ERTS_BIF_PREP_ERROR_TRAPPED0(Ret, Proc, Reason, Bif) \
do { \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -210,7 +228,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -219,7 +237,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
(Ret) = THE_NON_VALUE; \
@@ -229,7 +247,7 @@ do { \
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
- (Proc)->current = (Bif)->code; \
+ (Proc)->current = &(Bif)->info.mfa; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
reg[2] = (Eterm) (A2); \
@@ -277,6 +295,19 @@ do { \
(Ret) = THE_NON_VALUE; \
} while (0)
+#define ERTS_BIF_PREP_TRAP4(Ret, Trap, Proc, A0, A1, A2, A3) \
+do { \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
+ (Proc)->arity = 4; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ reg[3] = (Eterm) (A3); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
#define ERTS_BIF_PREP_TRAP3_NO_RET(Trap, Proc, A0, A1, A2)\
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
@@ -288,7 +319,7 @@ do { \
(Proc)->freason = TRAP; \
} while (0)
-#define BIF_TRAP0(p, Trap_) do { \
+#define BIF_TRAP0(Trap_, p) do { \
(p)->arity = 0; \
(p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
(p)->freason = TRAP; \
@@ -325,6 +356,18 @@ do { \
return THE_NON_VALUE; \
} while(0)
+#define BIF_TRAP4(Trap_, p, A0, A1, A2, A3) do { \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
+ (p)->arity = 4; \
+ reg[0] = (A0); \
+ reg[1] = (A1); \
+ reg[2] = (A2); \
+ reg[3] = (A3); \
+ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
+ } while(0)
+
#define BIF_TRAP_CODE_PTR_0(p, Code_) do { \
(p)->arity = 0; \
(p)->i = (BeamInstr*) (Code_); \
@@ -383,10 +426,16 @@ do { \
ERTS_BIF_PREP_TRAP3(RET, (TRP), (P), (A0), (A1), (A2)); \
} while (0)
+#define ERTS_BIF_PREP_YIELD4(RET, TRP, P, A0, A1, A2, A3) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ ERTS_BIF_PREP_TRAP4(RET, (TRP), (P), (A0), (A1), (A2), (A3)); \
+} while (0)
+
#define ERTS_BIF_YIELD0(TRP, P) \
do { \
ERTS_VBUMP_ALL_REDS((P)); \
- BIF_TRAP0((TRP), (P)); \
+ BIF_TRAP0((TRP), (P)); \
} while (0)
#define ERTS_BIF_YIELD1(TRP, P, A0) \
@@ -407,10 +456,22 @@ do { \
BIF_TRAP3((TRP), (P), (A0), (A1), (A2)); \
} while (0)
+#define ERTS_BIF_YIELD4(TRP, P, A0, A1, A2, A3) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ BIF_TRAP4((TRP), (P), (A0), (A1), (A2), (A3)); \
+} while (0)
+
+#define ERTS_BIF_PREP_EXITED(RET, PROC) \
+do { \
+ KILL_CATCHES((PROC)); \
+ ERTS_BIF_PREP_ERROR((RET), (PROC), EXTAG_EXIT); \
+} while (0)
+
#define ERTS_BIF_EXITED(PROC) \
do { \
KILL_CATCHES((PROC)); \
- BIF_ERROR((PROC), EXC_EXIT); \
+ BIF_ERROR((PROC), EXTAG_EXIT); \
} while (0)
#define ERTS_BIF_CHK_EXITED(PROC) \
@@ -419,66 +480,40 @@ do { \
ERTS_BIF_EXITED((PROC)); \
} while (0)
-/*
- * The ERTS_BIF_*_AWAIT_X_*_TRAP makros either exits the caller, or
- * sets up a trap to erlang:await_proc_exit/3.
- *
- * The caller is acquired to hold the 'main' lock on C_P. No other locks
- * are allowed to be held.
- */
-
-#define ERTS_BIF_PREP_AWAIT_X_DATA_TRAP(RET, C_P, PID, DATA) \
-do { \
- erts_bif_prep_await_proc_exit_data_trap((C_P), (PID), (DATA)); \
- (RET) = THE_NON_VALUE; \
-} while (0)
-
-#define ERTS_BIF_PREP_AWAIT_X_REASON_TRAP(RET, C_P, PID) \
-do { \
- erts_bif_prep_await_proc_exit_reason_trap((C_P), (PID)); \
- (RET) = THE_NON_VALUE; \
-} while (0)
-
-#define ERTS_BIF_PREP_AWAIT_X_APPLY_TRAP(RET, C_P, PID, M, F, A, AN) \
-do { \
- erts_bif_prep_await_proc_exit_apply_trap((C_P), (PID), \
- (M), (F), (A), (AN)); \
- (RET) = THE_NON_VALUE; \
-} while (0)
-
-#define ERTS_BIF_AWAIT_X_DATA_TRAP(C_P, PID, DATA) \
-do { \
- erts_bif_prep_await_proc_exit_data_trap((C_P), (PID), (DATA)); \
- return THE_NON_VALUE; \
-} while (0)
-
-#define ERTS_BIF_AWAIT_X_REASON_TRAP(C_P, PID) \
-do { \
- erts_bif_prep_await_proc_exit_reason_trap((C_P), (PID)); \
- return THE_NON_VALUE; \
-} while (0)
-
-#define ERTS_BIF_AWAIT_X_APPLY_TRAP(C_P, PID, M, F, A, AN) \
-do { \
- erts_bif_prep_await_proc_exit_apply_trap((C_P), (PID), \
- (M), (F), (A), (AN)); \
- return THE_NON_VALUE; \
-} while (0)
+int erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p,
+ BeamInstr *I, Eterm *reg);
+
+BIF_RETTYPE
+erts_schedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type,
+ Eterm mod,
+ Eterm func,
+ int argc);
+
+ERTS_GLB_INLINE BIF_RETTYPE
+erts_reschedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE BIF_RETTYPE
+erts_reschedule_bif(Process *proc,
+ Eterm *argv,
+ BeamInstr *i,
+ ErtsBifFunc dbf,
+ ErtsSchedType sched_type)
+{
+ return erts_schedule_bif(proc, argv, i, dbf, sched_type,
+ THE_NON_VALUE, THE_NON_VALUE, -1);
+}
-void
-erts_bif_prep_await_proc_exit_data_trap(Process *c_p,
- Eterm pid,
- Eterm data);
-void
-erts_bif_prep_await_proc_exit_reason_trap(Process *c_p,
- Eterm pid);
-void
-erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
- Eterm pid,
- Eterm module,
- Eterm function,
- Eterm args[],
- int nargs);
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
#ifdef ERL_WANT_HIPE_BIF_WRAPPER__
@@ -510,16 +545,16 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY) \
-BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
- Eterm* args); \
-BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \
- Eterm* args) \
+BIF_RETTYPE \
+nbif_impl_hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (NBIF_ALIST); \
+BIF_RETTYPE \
+nbif_impl_hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (NBIF_ALIST) \
{ \
BIF_RETTYPE res; \
- hipe_reserve_beam_trap_frame(c_p, args, ARITY); \
- res = BIF_NAME ## _ ## ARITY (c_p, args); \
- if (is_value(res) || c_p->freason != TRAP) { \
- hipe_unreserve_beam_trap_frame(c_p); \
+ hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, ARITY); \
+ res = nbif_impl_ ## BIF_NAME ## _ ## ARITY (NBIF_CALL_ARGS); \
+ if (is_value(res) || BIF_P->freason != TRAP) { \
+ hipe_unreserve_beam_trap_frame(BIF_P); \
} \
return res; \
}
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 80db4eb6ff..d4ba90a61a 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2016. All Rights Reserved.
+# Copyright Ericsson AB 1996-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,22 +23,25 @@
#
# Lines starting with '#' are ignored.
#
-# <bif-decl> ::= "bif" <bif> <C-name>* | "ubif" <bif> <C-name>*
+# <bif-decl> ::= "bif" <bif> <C-name>* |
+# "ubif" <bif> <C-name>* |
+# "gcbif" <bif> <C-name>*
# <bif> ::= <module> ":" <name> "/" <arity>
#
-# "ubif" is an unwrapped bif, i.e. a bif without a trace wrapper,
-# or rather; the trace entry point in the export entry is the same
-# as the normal entry point, and no trace wrapper is generated.
+# ubif: Use for operators and guard BIFs that never build anything
+# on the heap (such as tuple_size/1) and operators.
#
-# Important: Use "ubif" for guard BIFs and operators; use "bif" for ordinary BIFs.
+# gcbif: Use for guard BIFs that may build on the heap (such as abs/1).
+#
+# bif: Use for all other BIFs.
#
# Add new BIFs to the end of the file.
#
-# Note: Guards BIFs require special support in the compiler (to be able to actually
-# call them from within a guard).
+# Note: Guards BIFs usually require special support in the compiler.
#
-ubif erlang:abs/1
+
+gcbif erlang:abs/1
bif erlang:adler32/1
bif erlang:adler32/2
bif erlang:adler32_combine/3
@@ -60,18 +63,20 @@ bif erlang:erase/0
bif erlang:erase/1
bif erlang:exit/1
bif erlang:exit/2
+bif erlang:exit_signal/2
bif erlang:external_size/1
bif erlang:external_size/2
-ubif erlang:float/1
+gcbif erlang:float/1
bif erlang:float_to_list/1
bif erlang:float_to_list/2
bif erlang:fun_info/2
-bif erlang:garbage_collect/0
+bif erts_internal:garbage_collect/1
bif erlang:get/0
bif erlang:get/1
bif erlang:get_keys/1
bif erlang:group_leader/0
-bif erlang:group_leader/2
+bif erts_internal:group_leader/2
+bif erts_internal:group_leader/3
bif erlang:halt/2
bif erlang:phash/2
bif erlang:phash2/1
@@ -79,13 +84,15 @@ bif erlang:phash2/2
ubif erlang:hd/1
bif erlang:integer_to_list/1
bif erlang:is_alive/0
-ubif erlang:length/1
+gcbif erlang:length/1
bif erlang:link/1
bif erlang:list_to_atom/1
bif erlang:list_to_binary/1
bif erlang:list_to_float/1
bif erlang:list_to_integer/1
bif erlang:list_to_pid/1
+bif erlang:list_to_port/1
+bif erlang:list_to_ref/1
bif erlang:list_to_tuple/1
bif erlang:loaded/0
bif erlang:localtime/0
@@ -119,17 +126,17 @@ bif erlang:pid_to_list/1
bif erlang:ports/0
bif erlang:pre_loaded/0
bif erlang:process_flag/2
-bif erlang:process_flag/3
+bif erts_internal:process_flag/3
bif erlang:process_info/1
bif erlang:process_info/2
bif erlang:processes/0
bif erlang:put/2
bif erlang:register/2
bif erlang:registered/0
-ubif erlang:round/1
+gcbif erlang:round/1
ubif erlang:self/0
bif erlang:setelement/3
-ubif erlang:size/1
+gcbif erlang:size/1
bif erlang:spawn/3
bif erlang:spawn_link/3
bif erlang:split_binary/2
@@ -139,7 +146,7 @@ bif erlang:term_to_binary/2
bif erlang:throw/1
bif erlang:time/0
ubif erlang:tl/1
-ubif erlang:trunc/1
+gcbif erlang:trunc/1
bif erlang:tuple_to_list/1
bif erlang:universaltime/0
bif erlang:universaltime_to_localtime/1
@@ -148,8 +155,11 @@ bif erlang:unregister/1
bif erlang:whereis/1
bif erlang:spawn_opt/1
bif erlang:setnode/2
-bif erlang:setnode/3
-bif erlang:dist_exit/3
+bif erlang:dist_get_stat/1
+bif erlang:dist_ctrl_input_handler/2
+bif erlang:dist_ctrl_put_data/2
+bif erlang:dist_ctrl_get_data/1
+bif erlang:dist_ctrl_get_data_notification/1
# Static native functions in erts_internal
bif erts_internal:port_info/1
@@ -162,7 +172,7 @@ bif erts_internal:port_connect/2
bif erts_internal:request_system_task/3
bif erts_internal:request_system_task/4
-bif erts_internal:check_process_code/2
+bif erts_internal:check_process_code/1
bif erts_internal:map_to_tuple_keys/1
bif erts_internal:term_type/1
@@ -177,6 +187,12 @@ bif erts_internal:system_check/1
bif erts_internal:release_literal_area_switch/0
+bif erts_internal:scheduler_wall_time/1
+
+bif erts_internal:dirty_process_handle_signals/1
+
+bif erts_internal:create_dist_channel/4
+
# inet_db support
bif erlang:port_set_data/2
bif erlang:port_get_data/1
@@ -190,9 +206,9 @@ bif erlang:seq_trace/2
bif erlang:seq_trace_info/1
bif erlang:seq_trace_print/1
bif erlang:seq_trace_print/2
-bif erlang:suspend_process/2
+bif erts_internal:suspend_process/2
bif erlang:resume_process/1
-bif erlang:process_display/2
+bif erts_internal:process_display/2
bif erlang:bump_reductions/1
@@ -252,6 +268,7 @@ bif erlang:demonitor/1
bif erlang:demonitor/2
bif erlang:is_process_alive/1
+bif erts_internal:is_process_alive/2
bif erlang:error/1 error_1
bif erlang:error/2 error_2
@@ -322,11 +339,10 @@ bif erlang:match_spec_test/3
# Bifs in ets module.
#
-bif ets:all/0
+bif ets:internal_request_all/0
bif ets:new/2
bif ets:delete/1
bif ets:delete/2
-bif ets:delete_all_objects/1
bif ets:delete_object/2
bif ets:first/1
bif ets:is_compiled_ms/1
@@ -357,7 +373,7 @@ bif ets:select_count/2
bif ets:select_reverse/1
bif ets:select_reverse/2
bif ets:select_reverse/3
-bif ets:select_delete/2
+bif ets:select_replace/2
bif ets:match_spec_compile/1
bif ets:match_spec_run_r/3
@@ -365,9 +381,10 @@ bif ets:match_spec_run_r/3
# Bifs in os module.
#
-bif os:putenv/2
-bif os:getenv/0
-bif os:getenv/1
+bif os:get_env_var/1
+bif os:set_env_var/2
+bif os:unset_env_var/1
+bif os:list_env_vars/0
bif os:getpid/0
bif os:timestamp/0
bif os:system_time/0
@@ -389,6 +406,7 @@ bif erl_ddll:demonitor/1
#
# Bifs in the re module
#
+bif re:version/0
bif re:compile/1
bif re:compile/2
bif re:run/2
@@ -417,18 +435,17 @@ bif erts_debug:set_internal_state/2
bif erts_debug:display/1
bif erts_debug:dist_ext_to_term/2
bif erts_debug:instructions/0
-
-#
-# Monitor testing bif's...
-#
-bif erts_debug:dump_monitors/1
-bif erts_debug:dump_links/1
-
+bif erts_debug:dirty_cpu/2
+bif erts_debug:dirty_io/2
+bif erts_debug:dirty/3
#
# Lock counter bif's
#
-bif erts_debug:lock_counters/1
+bif erts_debug:lcnt_control/2
+bif erts_debug:lcnt_control/1
+bif erts_debug:lcnt_collect/0
+bif erts_debug:lcnt_clear/0
#
# New Bifs in R8.
@@ -452,8 +469,8 @@ bif error_logger:warning_map/0
bif erlang:get_module_info/1
bif erlang:get_module_info/2
ubif erlang:is_boolean/1
-bif string:to_integer/1
-bif string:to_float/1
+bif string:list_to_integer/1
+bif string:list_to_float/1
bif erlang:make_fun/3
bif erlang:iolist_size/1
bif erlang:iolist_to_binary/1
@@ -464,8 +481,8 @@ bif erlang:list_to_existing_atom/1
#
ubif erlang:is_bitstring/1
ubif erlang:tuple_size/1
-ubif erlang:byte_size/1
-ubif erlang:bit_size/1
+gcbif erlang:byte_size/1
+gcbif erlang:bit_size/1
bif erlang:list_to_bitstring/1
bif erlang:bitstring_to_list/1
@@ -517,8 +534,8 @@ bif erlang:binary_to_term/2
#
# The searching/splitting/substituting thingies
#
-ubif erlang:binary_part/2
-ubif erlang:binary_part/3
+gcbif erlang:binary_part/2
+gcbif erlang:binary_part/3
bif binary:compile_pattern/1
bif binary:match/2
@@ -532,9 +549,6 @@ bif binary:last/1
bif binary:at/2
bif binary:part/2 binary_binary_part_2
bif binary:part/3 binary_binary_part_3
-bif binary:bin_to_list/1
-bif binary:bin_to_list/2
-bif binary:bin_to_list/3
bif binary:list_to_bin/1
bif binary:copy/1
bif binary:copy/2
@@ -601,7 +615,6 @@ bif erlang:float_to_binary/2
bif erlang:binary_to_float/1
bif io:printable_range/0
-bif os:unsetenv/1
#
# New in 17.0
@@ -610,8 +623,7 @@ bif os:unsetenv/1
bif re:inspect/2
ubif erlang:is_map/1
-ubif erlang:map_size/1
-bif maps:to_list/1
+gcbif erlang:map_size/1
bif maps:find/2
bif maps:get/2
bif maps:from_list/1
@@ -657,7 +669,59 @@ bif erlang:has_prepared_code_on_load/1
bif maps:take/2
#
-# Obsolete
+# New in 20.0
+#
+
+gcbif erlang:floor/1
+gcbif erlang:ceil/1
+bif math:floor/1
+bif math:ceil/1
+bif math:fmod/2
+bif os:set_signal/2
+
+#
+# New in 20.1
+#
+bif erlang:iolist_to_iovec/1
+
+#
+# New in 21.0
#
-bif erlang:hash/2
+bif erts_internal:get_dflags/0
+bif erts_internal:new_connection/1
+bif erts_internal:abort_connection/2
+bif erts_internal:map_next/3
+bif ets:whereis/1
+bif erts_internal:gather_alloc_histograms/1
+bif erts_internal:gather_carrier_info/1
+ubif erlang:map_get/2
+ubif erlang:is_map_key/2
+bif ets:internal_delete_all/2
+bif ets:internal_select_delete/2
+
+#
+# New in 21.2
+#
+
+bif persistent_term:put/2
+bif persistent_term:get/1
+bif persistent_term:get/0
+bif persistent_term:erase/1
+bif persistent_term:info/0
+bif erts_internal:erase_persistent_terms/0
+
+bif erts_internal:atomics_new/2
+bif atomics:get/2
+bif atomics:put/3
+bif atomics:add/3
+bif atomics:add_get/3
+bif atomics:exchange/3
+bif atomics:compare_exchange/4
+bif atomics:info/1
+
+bif erts_internal:counters_new/1
+bif erts_internal:counters_get/2
+bif erts_internal:counters_add/3
+bif erts_internal:counters_put/3
+bif erts_internal:counters_info/1
diff --git a/erts/emulator/beam/bif_instrs.tab b/erts/emulator/beam/bif_instrs.tab
new file mode 100644
index 0000000000..00854471a9
--- /dev/null
+++ b/erts/emulator/beam/bif_instrs.tab
@@ -0,0 +1,547 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017-2018. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+// ================================================================
+// All guards with zero arguments have special instructions,
+// for example:
+//
+// self/0
+// node/0
+//
+// All other guard BIFs take one or two arguments.
+// ================================================================
+
+CALL_GUARD_BIF(BF, TmpReg, Dst) {
+ Eterm result;
+
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ c_p->fcalls = FCALLS;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ ERTS_CHK_MBUF_SZ(c_p);
+ result = (*$BF)(c_p, $TmpReg, I);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ if (ERTS_LIKELY(is_value(result))) {
+ $Dst = result;
+ $NEXT0();
+ }
+}
+
+// Guard BIF in head. On failure, ignore the error and jump
+// to the code for the next clause. We don't support tracing
+// of guard BIFs.
+
+bif1(Fail, Bif, Src, Dst) {
+ ErtsBifFunc bf;
+ Eterm tmp_reg[1];
+
+ tmp_reg[0] = $Src;
+ bf = (BifFunction) $Bif;
+ $CALL_GUARD_BIF(bf, tmp_reg, $Dst);
+
+ $FAIL($Fail);
+}
+
+//
+// Guard BIF in body. It can fail like any BIF. No trace support.
+//
+
+bif1_body(Bif, Src, Dst) {
+ ErtsBifFunc bf;
+ Eterm tmp_reg[1];
+
+ tmp_reg[0] = $Src;
+ bf = (BifFunction) $Bif;
+ $CALL_GUARD_BIF(bf, tmp_reg, $Dst);
+
+ reg[0] = tmp_reg[0];
+ SWAPOUT;
+ I = handle_error(c_p, I, reg, ubif2mfa((void *) bf));
+ goto post_error_handling;
+}
+
+//
+// Guard bif in guard with two arguments ('and'/2, 'or'/2, 'xor'/2).
+//
+
+i_bif2(Fail, Bif, Src1, Src2, Dst) {
+ Eterm tmp_reg[2];
+ ErtsBifFunc bf;
+
+ tmp_reg[0] = $Src1;
+ tmp_reg[1] = $Src2;
+ bf = (ErtsBifFunc) $Bif;
+ $CALL_GUARD_BIF(bf, tmp_reg, $Dst);
+ $FAIL($Fail);
+}
+
+//
+// Guard bif in body with two arguments ('and'/2, 'or'/2, 'xor'/2).
+//
+
+i_bif2_body(Bif, Src1, Src2, Dst) {
+ Eterm tmp_reg[2];
+ ErtsBifFunc bf;
+
+ tmp_reg[0] = $Src1;
+ tmp_reg[1] = $Src2;
+ bf = (ErtsBifFunc) $Bif;
+ $CALL_GUARD_BIF(bf, tmp_reg, $Dst);
+ reg[0] = tmp_reg[0];
+ reg[1] = tmp_reg[1];
+ SWAPOUT;
+ I = handle_error(c_p, I, reg, ubif2mfa((void *) bf));
+ goto post_error_handling;
+}
+
+//
+// Garbage-collecting BIF with one argument in either guard or body.
+//
+
+i_gc_bif1(Fail, Bif, Src, Live, Dst) {
+ typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
+ GcBifFunction bf;
+ Eterm result;
+ Uint live = (Uint) $Live;
+
+ x(live) = $Src;
+ bf = (GcBifFunction) $Bif;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ c_p->fcalls = FCALLS;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_CHK_MBUF_SZ(c_p);
+ result = (*bf)(c_p, reg, live);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ if (ERTS_LIKELY(is_value(result))) {
+ $REFRESH_GEN_DEST();
+ $Dst = result;
+ $NEXT0();
+ }
+ if (ERTS_LIKELY($Fail != 0)) { /* Handle error in guard. */
+ $JUMP($Fail);
+ }
+
+ /* Handle error in body. */
+ x(0) = x(live);
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
+ goto post_error_handling;
+}
+
+//
+// Garbage-collecting BIF with two arguments in either guard or body.
+//
+
+i_gc_bif2(Fail, Bif, Live, Src1, Src2, Dst) {
+ typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
+ GcBifFunction bf;
+ Eterm result;
+ Uint live = (Uint) $Live;
+
+ /*
+ * XXX This calling convention does not make sense. 'live'
+ * should point out the first argument, not the second
+ * (i.e. 'live' should not be incremented below).
+ */
+ x(live) = $Src1;
+ x(live+1) = $Src2;
+ live++;
+
+ bf = (GcBifFunction) $Bif;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ c_p->fcalls = FCALLS;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_CHK_MBUF_SZ(c_p);
+ result = (*bf)(c_p, reg, live);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ if (ERTS_LIKELY(is_value(result))) {
+ $REFRESH_GEN_DEST();
+ $Dst = result;
+ $NEXT0();
+ }
+
+ if (ERTS_LIKELY($Fail != 0)) { /* Handle error in guard. */
+ $JUMP($Fail);
+ }
+
+ /* Handle error in body. */
+ live--;
+ x(0) = x(live);
+ x(1) = x(live+1);
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
+ goto post_error_handling;
+}
+
+//
+// Garbage-collecting BIF with three arguments in either guard or body.
+//
+
+i_gc_bif3(Fail, Bif, Live, Src2, Src3, Dst) {
+ typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
+ GcBifFunction bf;
+ Eterm result;
+ Uint live = (Uint) $Live;
+
+ /*
+ * XXX This calling convention does not make sense. 'live'
+ * should point out the first argument, not the third
+ * (i.e. 'live' should not be incremented below).
+ */
+ x(live) = x(SCRATCH_X_REG);
+ x(live+1) = $Src2;
+ x(live+2) = $Src3;
+ live += 2;
+
+ bf = (GcBifFunction) $Bif;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ c_p->fcalls = FCALLS;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_CHK_MBUF_SZ(c_p);
+ result = (*bf)(c_p, reg, live);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ if (ERTS_LIKELY(is_value(result))) {
+ $REFRESH_GEN_DEST();
+ $Dst = result;
+ $NEXT0();
+ }
+
+ /* Handle error in guard. */
+ if (ERTS_LIKELY($Fail != 0)) {
+ $JUMP($Fail);
+ }
+
+ /* Handle error in body. */
+ live -= 2;
+ x(0) = x(live);
+ x(1) = x(live+1);
+ x(2) = x(live+2);
+ I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf));
+ goto post_error_handling;
+}
+
+//
+// The most general BIF call. The BIF may build any amount of data
+// on the heap. The result is always returned in r(0).
+//
+call_bif(Exp) {
+ ErtsBifFunc bf;
+ Eterm result;
+ ErlHeapFragment *live_hf_end;
+ Export *export = (Export*) $Exp;
+
+ if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) {
+ /* If we have run out of reductions, we do a context
+ switch before calling the bif */
+ c_p->arity = GET_BIF_ARITY(export);
+ c_p->current = &export->info.mfa;
+ goto context_switch3;
+ }
+
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_BIF_MODULE(export),
+ GET_BIF_ADDRESS(export));
+
+ bf = GET_BIF_ADDRESS(export);
+
+ PRE_BIF_SWAPOUT(c_p);
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ c_p->fcalls = FCALLS - 1;
+ if (FCALLS <= 0) {
+ save_calls(c_p, export);
+ }
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ live_hf_end = c_p->mbuf;
+ ERTS_CHK_MBUF_SZ(c_p);
+ result = (*bf)(c_p, reg, I);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ if (ERTS_IS_GC_DESIRED(c_p)) {
+ Uint arity = GET_BIF_ARITY(export);
+ result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result,
+ reg, arity);
+ E = c_p->stop;
+ }
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ HTOP = HEAP_TOP(c_p);
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ /* We have to update the cache if we are enabled in order
+ to make sure no book keeping is done after we disabled
+ msacc. We don't always do this as it is quite expensive. */
+ if (ERTS_MSACC_IS_ENABLED_CACHED_X()) {
+ ERTS_MSACC_UPDATE_CACHE_X();
+ }
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ if (ERTS_LIKELY(is_value(result))) {
+ r(0) = result;
+ CHECK_TERM(r(0));
+ $NEXT0();
+ } else if (c_p->freason == TRAP) {
+ SET_CP(c_p, I+2);
+ SET_I(c_p->i);
+ SWAPIN;
+ Dispatch();
+ }
+
+ /*
+ * Error handling. SWAPOUT is not needed because it was done above.
+ */
+ ASSERT(c_p->stop == E);
+ I = handle_error(c_p, I, reg, &export->info.mfa);
+ goto post_error_handling;
+}
+
+//
+// Send is almost a standard call-BIF with two arguments, except for:
+// 1. It cannot be traced.
+// 2. There is no pointer to the send_2 function stored in
+// the instruction.
+//
+
+send() {
+ Eterm result;
+
+ if (!(FCALLS > 0 || FCALLS > neg_o_reds)) {
+ /* If we have run out of reductions, we do a context
+ switch before calling the bif */
+ c_p->arity = 2;
+ c_p->current = NULL;
+ goto context_switch3;
+ }
+
+ PRE_BIF_SWAPOUT(c_p);
+ c_p->fcalls = FCALLS - 1;
+ result = erl_send(c_p, r(0), x(1));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ HTOP = HEAP_TOP(c_p);
+ FCALLS = c_p->fcalls;
+ if (ERTS_LIKELY(is_value(result))) {
+ r(0) = result;
+ CHECK_TERM(r(0));
+ } else if (c_p->freason == TRAP) {
+ SET_CP(c_p, I+1);
+ SET_I(c_p->i);
+ SWAPIN;
+ Dispatch();
+ } else {
+ goto find_func_info;
+ }
+}
+
+call_nif := nif_bif.call_nif.epilogue;
+apply_bif := nif_bif.apply_bif.epilogue;
+
+nif_bif.head() {
+ Eterm nif_bif_result;
+ Eterm bif_nif_arity;
+ BifFunction vbf;
+ ErlHeapFragment *live_hf_end;
+ ErtsCodeMFA *codemfa;
+}
+
+nif_bif.call_nif() {
+ /*
+ * call_nif is always first instruction in function:
+ *
+ * I[-3]: Module
+ * I[-2]: Function
+ * I[-1]: Arity
+ * I[0]: &&call_nif
+ * I[1]: Function pointer to NIF function
+ * I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
+ *
+ * This layout is determined by the NifExport struct
+ */
+
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
+
+ codemfa = erts_code_to_codemfa(I);
+
+ c_p->current = codemfa; /* current and vbf set to please handle_error */
+
+ DTRACE_NIF_ENTRY(c_p, codemfa);
+
+ HEAVY_SWAPOUT;
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ bif_nif_arity = codemfa->arity;
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ {
+ typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
+ NifF* fp = vbf = (NifF*) I[1];
+ struct enif_environment_t env;
+ ASSERT(c_p->scheduler_data);
+ live_hf_end = c_p->mbuf;
+ ERTS_CHK_MBUF_SZ(c_p);
+ erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL);
+
+ ASSERT((c_p->scheduler_data)->current_nif == NULL);
+ (c_p->scheduler_data)->current_nif = &env;
+
+ nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
+ if (env.exception_thrown)
+ nif_bif_result = THE_NON_VALUE;
+
+ ASSERT((c_p->scheduler_data)->current_nif == &env);
+ (c_p->scheduler_data)->current_nif = NULL;
+
+ erts_post_nif(&env);
+ ERTS_CHK_MBUF_SZ(c_p);
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ ASSERT(!env.exiting);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ }
+
+ DTRACE_NIF_RETURN(c_p, codemfa);
+}
+
+nif_bif.apply_bif() {
+ /*
+ * At this point, I points to the code[0] in the export entry for
+ * the BIF:
+ *
+ * code[-3]: Module
+ * code[-2]: Function
+ * code[-1]: Arity
+ * code[0]: &&apply_bif
+ * code[1]: Function pointer to BIF function
+ */
+
+ if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) {
+ /* If we have run out of reductions, we do a context
+ switch before calling the bif */
+ goto context_switch;
+ }
+
+ codemfa = erts_code_to_codemfa(I);
+
+ ERTS_MSACC_SET_BIF_STATE_CACHED_X(codemfa->module, (BifFunction)Arg(0));
+
+
+ /* In case we apply process_info/1,2 or load_nif/1 */
+ c_p->current = codemfa;
+ $SET_CP_I_ABS(I); /* In case we apply check_process_code/2. */
+ c_p->arity = 0; /* To allow garbage collection on ourselves
+ * (check_process_code/2).
+ */
+ DTRACE_BIF_ENTRY(c_p, codemfa);
+
+ SWAPOUT;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS - 1);
+ c_p->fcalls = FCALLS - 1;
+ vbf = (BifFunction) Arg(0);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ bif_nif_arity = codemfa->arity;
+ ASSERT(bif_nif_arity <= 4);
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ {
+ ErtsBifFunc bf = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ live_hf_end = c_p->mbuf;
+ ERTS_CHK_MBUF_SZ(c_p);
+ nif_bif_result = (*bf)(c_p, reg, I);
+ ERTS_CHK_MBUF_SZ(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
+ is_non_value(nif_bif_result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ }
+ /* We have to update the cache if we are enabled in order
+ to make sure no book keeping is done after we disabled
+ msacc. We don't always do this as it is quite expensive. */
+ if (ERTS_MSACC_IS_ENABLED_CACHED_X())
+ ERTS_MSACC_UPDATE_CACHE_X();
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ DTRACE_BIF_RETURN(c_p, codemfa);
+}
+
+nif_bif.epilogue() {
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ if (ERTS_IS_GC_DESIRED(c_p)) {
+ nif_bif_result = erts_gc_after_bif_call_lhf(c_p, live_hf_end,
+ nif_bif_result,
+ reg, bif_nif_arity);
+ }
+ SWAPIN; /* There might have been a garbage collection. */
+ FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ if (ERTS_LIKELY(is_value(nif_bif_result))) {
+ r(0) = nif_bif_result;
+ CHECK_TERM(r(0));
+ SET_I(c_p->cp);
+ c_p->cp = 0;
+ Goto(*I);
+ } else if (c_p->freason == TRAP) {
+ SET_I(c_p->i);
+ if (c_p->flags & F_HIBERNATE_SCHED) {
+ c_p->flags &= ~F_HIBERNATE_SCHED;
+ goto do_schedule;
+ }
+ Dispatch();
+ }
+ I = handle_error(c_p, c_p->cp, reg, c_p->current);
+ goto post_error_handling;
+}
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 4baee7900b..84338769e0 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -1293,8 +1293,11 @@ static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
*r++ = ~c ^ *y++;
x++;
}
- while(xl--)
- *r++ = ~*x++;
+ while(xl--) {
+ DSUBb(*x,0,b,c);
+ *r++ = ~c;
+ x++;
+ }
}
else {
ErtsDigit b1, b2;
@@ -1312,7 +1315,9 @@ static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
x++; y++;
}
while(xl--) {
- *r++ = *x++;
+ DSUBb(*x,0,b1,c1);
+ *r++ = c1;
+ x++;
}
}
}
@@ -2266,21 +2271,6 @@ Eterm big_minus(Eterm x, Eterm y, Eterm *r)
}
/*
-** Subtract a digit from big number
-*/
-Eterm big_minus_small(Eterm x, Eterm y, Eterm *r)
-{
- Eterm* xp = big_val(x);
-
- if (BIG_SIGN(xp))
- return big_norm(r, D_add(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y, BIG_V(r)),
- (short) BIG_SIGN(xp));
- else
- return big_norm(r, D_sub(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y, BIG_V(r)),
- (short) BIG_SIGN(xp));
-}
-
-/*
** Multiply smallnums
*/
@@ -2412,16 +2402,6 @@ Eterm big_rem(Eterm x, Eterm y, Eterm *r)
}
}
-Eterm big_neg(Eterm x, Eterm *r)
-{
- Eterm* xp = big_val(x);
- dsize_t xsz = BIG_SIZE(xp);
- short xsgn = BIG_SIGN(xp);
-
- MOVE_DIGITS(BIG_V(r), BIG_V(xp), xsz);
- return big_norm(r, xsz, (short) !xsgn);
-}
-
Eterm big_band(Eterm x, Eterm y, Eterm *r)
{
Eterm* xp = big_val(x);
@@ -2569,12 +2549,17 @@ int term_equals_2pow32(Eterm x)
}
}
+static ERTS_INLINE int c2int_is_valid_char(byte ch, int base) {
+ if (base <= 10)
+ return (ch >= '0' && ch < ('0' + base));
+ else
+ return (ch >= '0' && ch <= '9')
+ || (ch >= 'A' && ch < ('A' + base - 10))
+ || (ch >= 'a' && ch < ('a' + base - 10));
+}
+
static ERTS_INLINE int c2int_is_invalid_char(byte ch, int base) {
- return (ch < '0'
- || (ch > ('0' + base - 1)
- && !(base > 10
- && ((ch >= 'a' && ch < ('a' + base - 10))
- || (ch >= 'A' && ch < ('A' + base - 10))))));
+ return !c2int_is_valid_char(ch, base);
}
static ERTS_INLINE byte c2int_digit_from_base(byte ch) {
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index 4a96d971c3..7556205063 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -70,7 +70,20 @@ typedef Uint dsize_t; /* Vector size type */
/* Check for small */
#define IS_USMALL(sgn,x) ((sgn) ? ((x) <= MAX_SMALL+1) : ((x) <= MAX_SMALL))
-#define IS_SSMALL(x) (((x) >= MIN_SMALL) && ((x) <= MAX_SMALL))
+
+/*
+ * It seems that both clang and gcc will generate sub-optimal code
+ * for the more obvious way to write the range check:
+ *
+ * #define IS_SSMALL(x) (((x) >= MIN_SMALL) && ((x) <= MAX_SMALL))
+ *
+ * Note that IS_SSMALL() may be used in the 32-bit emulator with
+ * a Uint64 argument. Therefore, we must test the size of the argument
+ * to ensure that the cast does not discard the high-order 32 bits.
+ */
+#define _IS_SSMALL32(x) (((Uint32) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2)
+#define _IS_SSMALL64(x) (((Uint64) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2)
+#define IS_SSMALL(x) (sizeof(x) == sizeof(Uint32) ? _IS_SSMALL32(x) : _IS_SSMALL64(x))
/* The heap size needed for a bignum */
#define BIG_NEED_SIZE(x) ((x) + 1)
@@ -118,9 +131,7 @@ Eterm big_minus(Eterm, Eterm, Eterm*);
Eterm big_times(Eterm, Eterm, Eterm*);
Eterm big_div(Eterm, Eterm, Eterm*);
Eterm big_rem(Eterm, Eterm, Eterm*);
-Eterm big_neg(Eterm, Eterm*);
-Eterm big_minus_small(Eterm, Uint, Eterm*);
Eterm big_plus_small(Eterm, Uint, Eterm*);
Eterm big_times_small(Eterm, Uint, Eterm*);
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 071a356260..6a349764b2 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -47,13 +47,8 @@ void
erts_init_binary(void)
{
/* Verify Binary alignment... */
- if ((((UWord) &((Binary *) 0)->orig_bytes[0]) % ((UWord) 8)) != 0) {
- /* I assume that any compiler should be able to optimize this
- away. If not, this test is not very expensive... */
- erts_exit(ERTS_ABORT_EXIT,
- "Internal error: Address of orig_bytes[0] of a Binary"
- " is *not* 8-byte aligned\n");
- }
+ ERTS_CT_ASSERT((offsetof(Binary,orig_bytes) % 8) == 0);
+ ERTS_CT_ASSERT((offsetof(ErtsMagicBinary,u.aligned.data) % 8) == 0);
erts_init_trap_export(&binary_to_list_continue_export,
am_erts_internal, am_binary_to_list_continue, 1,
@@ -65,14 +60,36 @@ erts_init_binary(void)
}
+static ERTS_INLINE
+Eterm build_proc_bin(ErlOffHeap* ohp, Eterm* hp, Binary* bptr)
+{
+ ProcBin* pb = (ProcBin *) hp;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = bptr->orig_size;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*)pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+ OH_OVERHEAD(ohp, pb->size / sizeof(Eterm));
+
+ return make_binary(pb);
+}
+
+/** @brief Initiate a ProcBin for a full Binary.
+ * @param hp must point to PROC_BIN_SIZE available heap words.
+ */
+Eterm erts_build_proc_bin(ErlOffHeap* ohp, Eterm* hp, Binary* bptr)
+{
+ return build_proc_bin(ohp, hp, bptr);
+}
+
/*
* Create a brand new binary from scratch.
*/
-
Eterm
new_binary(Process *p, byte *buf, Uint len)
{
- ProcBin* pb;
Binary* bptr;
if (len <= ERL_ONHEAP_BIN_LIMIT) {
@@ -89,65 +106,64 @@ new_binary(Process *p, byte *buf, Uint len)
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(len);
- erts_refc_init(&bptr->refc, 1);
if (buf != NULL) {
sys_memcpy(bptr->orig_bytes, buf, len);
}
- /*
- * Now allocate the ProcBin on the heap.
- */
- pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = len;
- pb->next = MSO(p).first;
- MSO(p).first = (struct erl_off_heap_header*)pb;
- pb->val = bptr;
- pb->bytes = (byte*) bptr->orig_bytes;
- pb->flags = 0;
+ return build_proc_bin(&MSO(p), HAlloc(p, PROC_BIN_SIZE), bptr);
+}
+
+Eterm
+erts_heap_factory_new_binary(ErtsHeapFactory *hfact, byte *buf, Uint len,
+ Uint reserve_size)
+{
+ Eterm *hp;
+ Binary* bptr;
+
+ if (len <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hb;
+ hp = erts_produce_heap(hfact, heap_bin_size(len), reserve_size);
+ hb = (ErlHeapBin *) hp;
+ hb->thing_word = header_heap_bin(len);
+ hb->size = len;
+ if (buf != NULL) {
+ sys_memcpy(hb->data, buf, len);
+ }
+ return make_binary(hb);
+ }
/*
- * Miscellanous updates. Return the tagged binary.
+ * Allocate the binary struct itself.
*/
- OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- return make_binary(pb);
+ bptr = erts_bin_nrml_alloc(len);
+ if (buf != NULL) {
+ sys_memcpy(bptr->orig_bytes, buf, len);
+ }
+
+ hp = erts_produce_heap(hfact, PROC_BIN_SIZE, reserve_size);
+
+ return build_proc_bin(hfact->off_heap, hp, bptr);
}
+
+
/*
* When heap binary is not desired...
*/
Eterm erts_new_mso_binary(Process *p, byte *buf, Uint len)
{
- ProcBin* pb;
Binary* bptr;
/*
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(len);
- erts_refc_init(&bptr->refc, 1);
if (buf != NULL) {
sys_memcpy(bptr->orig_bytes, buf, len);
}
- /*
- * Now allocate the ProcBin on the heap.
- */
- pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = len;
- pb->next = MSO(p).first;
- MSO(p).first = (struct erl_off_heap_header*)pb;
- pb->val = bptr;
- pb->bytes = (byte*) bptr->orig_bytes;
- pb->flags = 0;
-
- /*
- * Miscellanous updates. Return the tagged binary.
- */
- OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- return make_binary(pb);
+ return build_proc_bin(&MSO(p), HAlloc(p, PROC_BIN_SIZE), bptr);
}
/*
@@ -355,9 +371,10 @@ typedef struct {
Uint bitoffs;
} ErtsB2LState;
-static void b2l_state_destructor(Binary *mbp)
+static int b2l_state_destructor(Binary *mbp)
{
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
+ return 1;
}
static BIF_RETTYPE
@@ -445,18 +462,17 @@ binary_to_list(Process *c_p, Eterm *hp, Eterm tail, byte *bytes,
sp->size = size;
sp->bitoffs = bitoffs;
- hp = HAlloc(c_p, PROC_BIN_SIZE);
- mb = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ mb = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
return binary_to_list_chunk(c_p, mb, sp, reds_left, 0);
}
}
static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1)
{
- Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ Binary *mbp = erts_magic_ref2bin(BIF_ARG_1);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor);
-
ASSERT(BIF_P->flags & F_DISABLE_GC);
return binary_to_list_chunk(BIF_P,
@@ -729,12 +745,13 @@ list_to_binary_engine(ErtsL2BState *sp)
}
}
-static void
+static int
l2b_state_destructor(Binary *mbp)
{
ErtsL2BState *sp = ERTS_MAGIC_BIN_DATA(mbp);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
DESTROY_SAVED_ESTACK(&sp->buf.iolist.estack);
+ return 1;
}
static ERTS_INLINE Eterm
@@ -792,8 +809,8 @@ list_to_binary_chunk(Eterm mb_eterm,
ERTS_L2B_STATE_MOVE(new_sp, sp);
sp = new_sp;
- hp = HAlloc(c_p, PROC_BIN_SIZE);
- mb_eterm = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ mb_eterm = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
ASSERT(is_value(mb_eterm));
@@ -839,9 +856,9 @@ list_to_binary_chunk(Eterm mb_eterm,
static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1)
{
- Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val;
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
+ Binary *mbp = erts_magic_ref2bin(BIF_ARG_1);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor);
ASSERT(BIF_P->flags & F_DISABLE_GC);
return list_to_binary_chunk(BIF_ARG_1,
@@ -970,7 +987,10 @@ HIPE_WRAPPER_BIF_DISABLE_GC(iolist_to_binary, 1)
BIF_RETTYPE iolist_to_binary_1(BIF_ALIST_1)
{
if (is_binary(BIF_ARG_1)) {
- BIF_RET(BIF_ARG_1);
+ if (binary_bitsize(BIF_ARG_1) == 0) {
+ BIF_RET(BIF_ARG_1);
+ }
+ BIF_ERROR(BIF_P, BADARG);
}
return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_iolist_to_binary_1]);
}
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 3c19e82b66..9ff52c92b8 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -36,31 +36,32 @@
#include "hash.h"
#include "atom.h"
#include "beam_load.h"
-#include "erl_instrument.h"
#include "erl_hl_timer.h"
#include "erl_thr_progress.h"
+#include "erl_proc_sig_queue.h"
/* Forward declarations -- should really appear somewhere else */
static void process_killer(void);
void do_break(void);
void erl_crash_dump_v(char *file, int line, char* fmt, va_list args);
-void erl_crash_dump(char* file, int line, char* fmt, ...);
#ifdef DEBUG
static void bin_check(void);
#endif
-static void print_garb_info(int to, void *to_arg, Process* p);
+static void print_garb_info(fmtfn_t to, void *to_arg, Process* p);
#ifdef OPPROF
static void dump_frequencies(void);
#endif
-static void dump_attributes(int to, void *to_arg, byte* ptr, int size);
+static void dump_attributes(fmtfn_t to, void *to_arg, byte* ptr, int size);
extern char* erts_system_version[];
+#define WRITE_BUFFER_SIZE (64*1024)
+
static void
-port_info(int to, void *to_arg)
+port_info(fmtfn_t to, void *to_arg)
{
int i, max = erts_ptab_max(&erts_port);
for (i = 0; i < max; i++) {
@@ -71,7 +72,7 @@ port_info(int to, void *to_arg)
}
void
-process_info(int to, void *to_arg)
+process_info(fmtfn_t to, void *to_arg)
{
int i, max = erts_ptab_max(&erts_proc);
for (i = 0; i < max; i++) {
@@ -106,36 +107,11 @@ process_killer(void)
if ((j = sys_get_key(0)) <= 0)
erts_exit(0, "");
switch(j) {
- case 'k': {
- ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- erts_aint32_t state;
- erts_proc_inc_refc(rp);
- erts_smp_proc_lock(rp, rp_locks);
- state = erts_smp_atomic32_read_acqb(&rp->state);
- if (state & (ERTS_PSFLG_FREE
- | ERTS_PSFLG_EXITING
- | ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_IN_RUNQ
- | ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
- erts_printf("Can only kill WAITING processes this way\n");
- }
- else {
- (void) erts_send_exit_signal(NULL,
- NIL,
- rp,
- &rp_locks,
- am_kill,
- NIL,
- NULL,
- 0);
- }
- erts_smp_proc_unlock(rp, rp_locks);
- erts_proc_dec_refc(rp);
- }
+ case 'k':
+ /* Send a 'kill' exit signal from init process */
+ erts_proc_sig_send_exit(NULL, erts_init_process_id,
+ rp->common.id, am_kill, NIL,
+ 0);
case 'n': br = 1; break;
case 'r': return;
default: return;
@@ -148,60 +124,86 @@ process_killer(void)
typedef struct {
int is_first;
- int to;
+ fmtfn_t to;
void *to_arg;
} PrintMonitorContext;
static void doit_print_link(ErtsLink *lnk, void *vpcontext)
{
PrintMonitorContext *pcontext = vpcontext;
- int to = pcontext->to;
+ fmtfn_t to = pcontext->to;
void *to_arg = pcontext->to_arg;
if (pcontext->is_first) {
pcontext->is_first = 0;
- erts_print(to, to_arg, "%T", lnk->pid);
+ erts_print(to, to_arg, "%T", lnk->other.item);
} else {
- erts_print(to, to_arg, ", %T", lnk->pid);
+ erts_print(to, to_arg, ", %T", lnk->other.item);
}
}
static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext)
{
+ ErtsMonitorData *mdp;
PrintMonitorContext *pcontext = vpcontext;
- int to = pcontext->to;
+ fmtfn_t to = pcontext->to;
void *to_arg = pcontext->to_arg;
char *prefix = ", ";
- if (pcontext->is_first) {
- pcontext->is_first = 0;
- prefix = "";
- }
-
- if (mon->type == MON_ORIGIN) {
- if (is_atom(mon->pid)) { /* dist by name */
- ASSERT(is_node_name_atom(mon->pid));
- erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
- mon->pid, mon->ref);
- } else if (is_atom(mon->name)){ /* local by name */
- erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
- erts_this_dist_entry->sysname, mon->ref);
- } else { /* local and distributed by pid */
- erts_print(to, to_arg, "%s{to,%T,%T}", prefix, mon->pid, mon->ref);
- }
- } else { /* MON_TARGET */
- erts_print(to, to_arg, "%s{from,%T,%T}", prefix, mon->pid, mon->ref);
+ mdp = erts_monitor_to_data(mon);
+ switch (mon->type) {
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_PORT:
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ case ERTS_MON_TYPE_DIST_PROC:
+ case ERTS_MON_TYPE_RESOURCE:
+ case ERTS_MON_TYPE_NODE:
+
+ if (pcontext->is_first) {
+ pcontext->is_first = 0;
+ prefix = "";
+ }
+
+ if (erts_monitor_is_target(mon)) {
+ if (mon->type != ERTS_MON_TYPE_RESOURCE)
+ erts_print(to, to_arg, "%s{from,%T,%T}", prefix, mon->other.item, mdp->ref);
+ else {
+ ErtsResource* rsrc = mon->other.ptr;
+ erts_print(to, to_arg, "%s{from,{%T,%T},%T}", prefix, rsrc->type->module,
+ rsrc->type->name, mdp->ref);
+ }
+ }
+ else {
+ if (!(mon->flags & ERTS_ML_FLG_NAME))
+ erts_print(to, to_arg, "%s{to,%T,%T}", prefix, mon->other.item, mdp->ref);
+ else {
+ ErtsMonitorDataExtended *mdep = (ErtsMonitorDataExtended *) mdp;
+ Eterm node;
+ if (mdep->dist)
+ node = mdep->dist->nodename;
+ else
+ node = erts_this_dist_entry->sysname;
+ erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mdep->u.name,
+ node, mdp->ref);
+ }
+ }
+
+ break;
+
+ default:
+ /* ignore other monitors... */
+ break;
}
}
/* Display info about an individual Erlang process */
void
-print_process_info(int to, void *to_arg, Process *p)
+print_process_info(fmtfn_t to, void *to_arg, Process *p)
{
- time_t approx_started;
int garbing = 0;
int running = 0;
+ Sint len;
struct saved_calls *scb;
erts_aint32_t state;
@@ -211,7 +213,7 @@ print_process_info(int to, void *to_arg, Process *p)
/* Display the state */
erts_print(to, to_arg, "State: ");
- state = erts_smp_atomic32_read_acqb(&p->state);
+ state = erts_atomic32_read_acqb(&p->state);
erts_dump_process_state(to, to_arg, state);
if (state & ERTS_PSFLG_GC) {
garbing = 1;
@@ -231,9 +233,9 @@ print_process_info(int to, void *to_arg, Process *p)
* Display the initial function name
*/
erts_print(to, to_arg, "Spawned as: %T:%T/%bpu\n",
- p->u.initial[INITIAL_MOD],
- p->u.initial[INITIAL_FUN],
- p->u.initial[INITIAL_ARI]);
+ p->u.initial.module,
+ p->u.initial.function,
+ p->u.initial.arity);
if (p->current != NULL) {
if (running) {
@@ -242,23 +244,28 @@ print_process_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "Current call: ");
}
erts_print(to, to_arg, "%T:%T/%bpu\n",
- p->current[0],
- p->current[1],
- p->current[2]);
+ p->current->module,
+ p->current->function,
+ p->current->arity);
}
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
- approx_started = (time_t) p->approx_started;
- erts_print(to, to_arg, "Started: %s", ctime(&approx_started));
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
- erts_print(to, to_arg, "Message queue length: %d\n", p->msg.len);
+
+ erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
+ len = erts_proc_sig_fetch(p);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+ erts_print(to, to_arg, "Message queue length: %d\n", len);
/* display the message queue only if there is anything in it */
- if (!ERTS_IS_CRASH_DUMPING && p->msg.first != NULL && !garbing) {
- ErtsMessage* mp;
+ if (!ERTS_IS_CRASH_DUMPING && p->sig_qs.first != NULL && !garbing) {
erts_print(to, to_arg, "Message queue: [");
- for (mp = p->msg.first; mp; mp = mp->next)
- erts_print(to, to_arg, mp->next ? "%T," : "%T", ERL_MESSAGE_TERM(mp));
+ ERTS_FOREACH_SIG_PRIVQS(
+ p, mp,
+ {
+ if (ERTS_SIG_IS_NON_MSG((ErtsSignal *) mp))
+ erts_print(to, to_arg, mp->next ? "%T," : "%T",
+ ERL_MESSAGE_TERM(mp));
+ });
erts_print(to, to_arg, "]\n");
}
@@ -291,19 +298,20 @@ print_process_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "timeout");
else
erts_print(to, to_arg, "%T:%T/%bpu\n",
- scb->ct[j]->code[0],
- scb->ct[j]->code[1],
- scb->ct[j]->code[2]);
+ scb->ct[j]->info.mfa.module,
+ scb->ct[j]->info.mfa.function,
+ scb->ct[j]->info.mfa.arity);
}
erts_print(to, to_arg, "\n");
}
/* display the links only if there are any*/
- if (ERTS_P_LINKS(p) || ERTS_P_MONITORS(p)) {
- PrintMonitorContext context = {1,to};
+ if (ERTS_P_LINKS(p) || ERTS_P_MONITORS(p) || ERTS_P_LT_MONITORS(p)) {
+ PrintMonitorContext context = {1, to, to_arg};
erts_print(to, to_arg,"Link list: [");
- erts_doforall_links(ERTS_P_LINKS(p), &doit_print_link, &context);
- erts_doforall_monitors(ERTS_P_MONITORS(p), &doit_print_monitor, &context);
+ erts_link_tree_foreach(ERTS_P_LINKS(p), doit_print_link, &context);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(p), doit_print_monitor, &context);
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(p), doit_print_monitor, &context);
erts_print(to, to_arg,"]\n");
}
@@ -326,6 +334,12 @@ print_process_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "Heap unused: %bpu\n", (p->hend - p->htop));
erts_print(to, to_arg, "OldHeap unused: %bpu\n",
(OLD_HEAP(p) == NULL) ? 0 : (OLD_HEND(p) - OLD_HTOP(p)) );
+ erts_print(to, to_arg, "BinVHeap: %b64u\n", p->off_heap.overhead);
+ erts_print(to, to_arg, "OldBinVHeap: %b64u\n", BIN_OLD_VHEAP(p));
+ erts_print(to, to_arg, "BinVHeap unused: %b64u\n",
+ BIN_VHEAP_SZ(p) - p->off_heap.overhead);
+ erts_print(to, to_arg, "OldBinVHeap unused: %b64u\n",
+ BIN_OLD_VHEAP_SZ(p) - BIN_OLD_VHEAP(p));
erts_print(to, to_arg, "Memory: %beu\n", erts_process_memory(p, !0));
if (garbing) {
@@ -336,9 +350,7 @@ print_process_info(int to, void *to_arg, Process *p)
erts_program_counter_info(to, to_arg, p);
} else {
erts_print(to, to_arg, "Stack dump:\n");
-#ifdef ERTS_SMP
if (!garbing)
-#endif
erts_stack_dump(to, to_arg, p);
}
@@ -348,13 +360,11 @@ print_process_info(int to, void *to_arg, Process *p)
}
static void
-print_garb_info(int to, void *to_arg, Process* p)
+print_garb_info(fmtfn_t to, void *to_arg, Process* p)
{
-#ifdef ERTS_SMP
- /* ERTS_SMP: A scheduler is probably concurrently doing gc... */
+ /* A scheduler is probably concurrently doing gc... */
if (!ERTS_IS_CRASH_DUMPING)
return;
-#endif
erts_print(to, to_arg, "New heap start: %bpX\n", p->heap);
erts_print(to, to_arg, "New heap top: %bpX\n", p->htop);
erts_print(to, to_arg, "Stack top: %bpX\n", p->stop);
@@ -365,7 +375,7 @@ print_garb_info(int to, void *to_arg, Process* p)
}
void
-info(int to, void *to_arg)
+info(fmtfn_t to, void *to_arg)
{
erts_memory(&to, to_arg, NULL, THE_NON_VALUE);
atom_info(to, to_arg);
@@ -380,8 +390,20 @@ info(int to, void *to_arg)
}
+static int code_size(struct erl_module_instance* modi)
+{
+ int size = modi->code_length;
+
+ if (modi->code_hdr) {
+ ErtsLiteralArea* lit = modi->code_hdr->literal_area;
+ if (lit)
+ size += (lit->end - lit->start) * sizeof(Eterm);
+ }
+ return size;
+}
+
void
-loaded(int to, void *to_arg)
+loaded(fmtfn_t to, void *to_arg)
{
int i;
int old = 0;
@@ -397,13 +419,9 @@ loaded(int to, void *to_arg)
* Calculate and print totals.
*/
for (i = 0; i < module_code_size(code_ix); i++) {
- if ((modp = module_code(i, code_ix)) != NULL &&
- ((modp->curr.code_length != 0) ||
- (modp->old.code_length != 0))) {
- cur += modp->curr.code_length;
- if (modp->old.code_length != 0) {
- old += modp->old.code_length;
- }
+ if ((modp = module_code(i, code_ix)) != NULL) {
+ cur += code_size(&modp->curr);
+ old += code_size(&modp->old);
}
}
erts_print(to, to_arg, "Current code: %d\n", cur);
@@ -419,31 +437,25 @@ loaded(int to, void *to_arg)
/*
* Interactive dump; keep it brief.
*/
- if (modp != NULL &&
- ((modp->curr.code_length != 0) ||
- (modp->old.code_length != 0))) {
- erts_print(to, to_arg, "%T", make_atom(modp->module));
- cur += modp->curr.code_length;
- erts_print(to, to_arg, " %d", modp->curr.code_length );
- if (modp->old.code_length != 0) {
- erts_print(to, to_arg, " (%d old)",
- modp->old.code_length );
- old += modp->old.code_length;
- }
+ if (modp != NULL && ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
+ erts_print(to, to_arg, "%T %d", make_atom(modp->module),
+ code_size(&modp->curr));
+ if (modp->old.code_length != 0)
+ erts_print(to, to_arg, " (%d old)", code_size(&modp->old));
erts_print(to, to_arg, "\n");
}
} else {
/*
* To crash dump; make it parseable.
*/
- if (modp != NULL &&
- ((modp->curr.code_length != 0) ||
- (modp->old.code_length != 0))) {
+ if (modp != NULL && ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
erts_print(to, to_arg, "=mod:");
erts_print(to, to_arg, "%T", make_atom(modp->module));
erts_print(to, to_arg, "\n");
erts_print(to, to_arg, "Current size: %d\n",
- modp->curr.code_length);
+ code_size(&modp->curr));
code = modp->curr.code_hdr;
if (code != NULL && code->attr_ptr) {
erts_print(to, to_arg, "Current attributes: ");
@@ -457,7 +469,7 @@ loaded(int to, void *to_arg)
}
if (modp->old.code_length != 0) {
- erts_print(to, to_arg, "Old size: %d\n", modp->old.code_length);
+ erts_print(to, to_arg, "Old size: %d\n", code_size(&modp->old));
code = modp->old.code_hdr;
if (code->attr_ptr) {
erts_print(to, to_arg, "Old attributes: ");
@@ -478,11 +490,9 @@ loaded(int to, void *to_arg)
static void
-dump_attributes(int to, void *to_arg, byte* ptr, int size)
+dump_attributes(fmtfn_t to, void *to_arg, byte* ptr, int size)
{
- while (size-- > 0) {
- erts_print(to, to_arg, "%02X", *ptr++);
- }
+ erts_print_base64(to, to_arg, ptr, size);
erts_print(to, to_arg, "\n");
}
@@ -497,11 +507,13 @@ do_break(void)
/* check if we're in console mode and, if so,
halt immediately if break is called */
mode = erts_read_env("ERL_CONSOLE_MODE");
- if (mode && strcmp(mode, "window") != 0)
+ if (mode && sys_strcmp(mode, "window") != 0)
erts_exit(0, "");
erts_free_read_env(mode);
#endif /* __WIN32__ */
+ ASSERT(erts_thr_progress_is_blocking());
+
erts_printf("\n"
"BREAK: (a)bort (c)ontinue (p)roc info (i)nfo (l)oaded\n"
" (v)ersion (k)ill (D)b-tables (d)istribution\n");
@@ -578,7 +590,7 @@ do_break(void)
#endif
#ifdef DEBUG
case 't':
- erts_p_slpq();
+ /* erts_p_slpq(); */
return;
case 'b':
bin_check();
@@ -648,7 +660,7 @@ bin_check(void)
erts_printf("%p orig_size: %bpd, norefs = %bpd\n",
bp->val,
bp->val->orig_size,
- erts_smp_atomic_read_nob(&bp->val->refc));
+ erts_refc_read(&bp->val->intern.refc, 1));
}
}
if (printed) {
@@ -662,13 +674,41 @@ bin_check(void)
#endif
+static Sint64 crash_dump_limit = ERTS_SINT64_MAX;
+static Sint64 crash_dump_written = 0;
+
+typedef struct LimitedWriterInfo_ {
+ fmtfn_t to;
+ void* to_arg;
+} LimitedWriterInfo;
+
+static int
+crash_dump_limited_writer(void* vfdp, char* buf, size_t len)
+{
+ const char stop_msg[] = "\n=abort:CRASH DUMP SIZE LIMIT REACHED\n";
+ LimitedWriterInfo* lwi = (LimitedWriterInfo *) vfdp;
+
+ crash_dump_written += len;
+ if (crash_dump_written <= crash_dump_limit) {
+ return lwi->to(lwi->to_arg, buf, len);
+ }
+
+ len -= (crash_dump_written - crash_dump_limit);
+ lwi->to(lwi->to_arg, buf, len);
+ lwi->to(lwi->to_arg, (char*)stop_msg, sizeof(stop_msg)-1);
+ if (lwi->to == &erts_write_fp) {
+ fclose((FILE *) lwi->to_arg);
+ }
+
+ /* We assume that crash dump was called from erts_exit_vv() */
+ erts_exit_epilogue();
+}
+
/* XXX THIS SHOULD BE IN SYSTEM !!!! */
void
erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
{
-#ifdef ERTS_SMP
ErtsThrPrgrData tpd_buf; /* in case we aren't a managed thread... */
-#endif
int fd;
size_t envsz;
time_t now;
@@ -679,11 +719,15 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
int secs;
int env_erl_crash_dump_seconds_set = 1;
int i;
+ fmtfn_t to = &erts_write_fd;
+ void* to_arg;
+ FILE* fp = 0;
+ LimitedWriterInfo lwi;
+ static char* write_buffer; /* 'static' to avoid a leak warning in valgrind */
if (ERTS_SOMEONE_IS_CRASH_DUMPING)
return;
-#ifdef ERTS_SMP
/* Order all managed threads to block, this has to be done
first to guarantee that this is the only thread to generate
crash dump. */
@@ -696,6 +740,8 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
* We have to be very very careful when doing this as the schedulers
* could be anywhere.
*/
+ sys_init_suspend_handler();
+
for (i = 0; i < erts_no_schedulers; i++) {
erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid;
if (!erts_equal_tids(tid,erts_thr_self()))
@@ -705,12 +751,9 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
#endif
/* Allow us to pass certain places without locking... */
- erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1);
- erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1);
+ erts_atomic32_set_mb(&erts_writing_erl_crash_dump, 1);
+ erts_tsd_set(erts_is_crash_dumping_key, (void *) 1);
-#else /* !ERTS_SMP */
- erts_writing_erl_crash_dump = 1;
-#endif /* ERTS_SMP */
envsz = sizeof(env);
/* ERL_CRASH_DUMP_SECONDS not set
@@ -736,16 +779,16 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
* - write dump until alarm or file is written completely
*/
- if (erts_sys_getenv__("ERL_CRASH_DUMP_SECONDS", env, &envsz) != 0) {
- env_erl_crash_dump_seconds_set = 0;
- secs = -1;
+ if (erts_sys_explicit_8bit_getenv("ERL_CRASH_DUMP_SECONDS", env, &envsz) == 1) {
+ env_erl_crash_dump_seconds_set = 1;
+ secs = atoi(env);
} else {
- env_erl_crash_dump_seconds_set = 1;
- secs = atoi(env);
+ env_erl_crash_dump_seconds_set = 0;
+ secs = -1;
}
if (secs == 0) {
- return;
+ return;
}
/* erts_sys_prepare_crash_dump returns 1 if heart port is found, otherwise 0
@@ -759,7 +802,22 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
return;
}
- if (erts_sys_getenv__("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 0)
+ crash_dump_limit = ERTS_SINT64_MAX;
+ envsz = sizeof(env);
+ if (erts_sys_explicit_8bit_getenv("ERL_CRASH_DUMP_BYTES", env, &envsz) == 1) {
+ Sint64 limit;
+ char* endptr;
+ errno = 0;
+ limit = ErtsStrToSint64(env, &endptr, 10);
+ if (errno == 0 && limit >= 0 && endptr != env && *endptr == 0) {
+ if (limit == 0)
+ return;
+ crash_dump_limit = limit;
+ to = &crash_dump_limited_writer;
+ }
+ }
+
+ if (erts_sys_explicit_8bit_getenv("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 1)
dumpname = "erl_crash.dump";
else
dumpname = &dumpnamebuf[0];
@@ -769,40 +827,58 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
fd = open(dumpname,O_WRONLY | O_CREAT | O_TRUNC,0640);
if (fd < 0)
return; /* Can't create the crash dump, skip it */
+
+ /*
+ * Wrap into a FILE* so that we can use buffered output. Set an
+ * explicit buffer to make sure the first write does not fail because
+ * of a failure to allocate a buffer.
+ */
+ write_buffer = (char *) erts_alloc_fnf(ERTS_ALC_T_TMP, WRITE_BUFFER_SIZE);
+ if (write_buffer && (fp = fdopen(fd, "w")) != NULL) {
+ setvbuf(fp, write_buffer, _IOFBF, WRITE_BUFFER_SIZE);
+ lwi.to = &erts_write_fp;
+ lwi.to_arg = (void*)fp;
+ } else {
+ lwi.to = &erts_write_fd;
+ lwi.to_arg = (void*)&fd;
+ }
+ if (to == &crash_dump_limited_writer) {
+ to_arg = (void *) &lwi;
+ } else {
+ to = lwi.to;
+ to_arg = lwi.to_arg;
+ }
+
time(&now);
- erts_fdprintf(fd, "=erl_crash_dump:0.3\n%s", ctime(&now));
+ erts_cbprintf(to, to_arg, "=erl_crash_dump:0.5\n%s", ctime(&now));
if (file != NULL)
- erts_fdprintf(fd, "The error occurred in file %s, line %d\n", file, line);
+ erts_cbprintf(to, to_arg, "The error occurred in file %s, line %d\n", file, line);
if (fmt != NULL && *fmt != '\0') {
- erts_fdprintf(fd, "Slogan: ");
- erts_vfdprintf(fd, fmt, args);
+ erts_cbprintf(to, to_arg, "Slogan: ");
+ erts_vcbprintf(to, to_arg, fmt, args);
}
- erts_fdprintf(fd, "System version: ");
- erts_print_system_version(fd, NULL, NULL);
+ erts_cbprintf(to, to_arg, "System version: ");
+ erts_print_system_version(to, to_arg, NULL);
#if ERTS_SAVED_COMPILE_TIME
- erts_fdprintf(fd, "%s\n", "Compiled: " ERLANG_COMPILE_DATE);
+ erts_cbprintf(to, to_arg, "%s\n", "Compiled: " ERLANG_COMPILE_DATE);
#endif
- erts_fdprintf(fd, "Taints: ");
- erts_print_nif_taints(fd, NULL);
- erts_fdprintf(fd, "Atoms: %d\n", atom_table_size());
+ erts_cbprintf(to, to_arg, "Taints: ");
+ erts_print_nif_taints(to, to_arg);
+ erts_cbprintf(to, to_arg, "Atoms: %d\n", atom_table_size());
-#ifdef USE_THREADS
/* We want to note which thread it was that called erts_exit */
if (erts_get_scheduler_data()) {
- erts_fdprintf(fd, "Calling Thread: scheduler:%d\n",
+ erts_cbprintf(to, to_arg, "Calling Thread: scheduler:%d\n",
erts_get_scheduler_data()->no);
} else {
if (!erts_thr_getname(erts_thr_self(), dumpnamebuf, MAXPATHLEN))
- erts_fdprintf(fd, "Calling Thread: %s\n", dumpnamebuf);
+ erts_cbprintf(to, to_arg, "Calling Thread: %s\n", dumpnamebuf);
else
- erts_fdprintf(fd, "Calling Thread: %p\n", erts_thr_self());
+ erts_cbprintf(to, to_arg, "Calling Thread: %p\n", erts_thr_self());
}
-#else
- erts_fdprintf(fd, "Calling Thread: scheduler:1\n");
-#endif
#if defined(ERTS_HAVE_TRY_CATCH)
@@ -816,12 +892,26 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
*/
for (i = 0; i < erts_no_schedulers; i++) {
ERTS_SYS_TRY_CATCH(
- erts_print_scheduler_info(fd, NULL, ERTS_SCHEDULER_IX(i)),
- erts_fdprintf(fd, "** crashed **\n"));
+ erts_print_scheduler_info(to, to_arg, ERTS_SCHEDULER_IX(i)),
+ erts_cbprintf(to, to_arg, "** crashed **\n"));
+ }
+ for (i = 0; i < erts_no_dirty_cpu_schedulers; i++) {
+ ERTS_SYS_TRY_CATCH(
+ erts_print_scheduler_info(to, to_arg, ERTS_DIRTY_CPU_SCHEDULER_IX(i)),
+ erts_cbprintf(to, to_arg, "** crashed **\n"));
+ }
+ erts_cbprintf(to, to_arg, "=dirty_cpu_run_queue\n");
+ erts_print_run_queue_info(to, to_arg, ERTS_DIRTY_CPU_RUNQ);
+
+ for (i = 0; i < erts_no_dirty_io_schedulers; i++) {
+ ERTS_SYS_TRY_CATCH(
+ erts_print_scheduler_info(to, to_arg, ERTS_DIRTY_IO_SCHEDULER_IX(i)),
+ erts_cbprintf(to, to_arg, "** crashed **\n"));
}
+ erts_cbprintf(to, to_arg, "=dirty_io_run_queue\n");
+ erts_print_run_queue_info(to, to_arg, ERTS_DIRTY_IO_RUNQ);
#endif
-#ifdef ERTS_SMP
#ifdef ERTS_SYS_SUSPEND_SIGNAL
@@ -843,53 +933,57 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
*/
erts_thr_progress_fatal_error_wait(60000);
/* Either worked or not... */
-#endif
#ifndef ERTS_HAVE_TRY_CATCH
/* This is safe to call here, as all schedulers are blocked */
for (i = 0; i < erts_no_schedulers; i++) {
- erts_print_scheduler_info(fd, NULL, ERTS_SCHEDULER_IX(i));
+ erts_print_scheduler_info(to, to_arg, ERTS_SCHEDULER_IX(i));
}
#endif
- info(fd, NULL); /* General system info */
+ info(to, to_arg); /* General system info */
if (erts_ptab_initialized(&erts_proc))
- process_info(fd, NULL); /* Info about each process and port */
- db_info(fd, NULL, 0);
- erts_print_bif_timer_info(fd, NULL);
- distribution_info(fd, NULL);
- erts_fdprintf(fd, "=loaded_modules\n");
- loaded(fd, NULL);
- erts_dump_fun_entries(fd, NULL);
- erts_deep_process_dump(fd, NULL);
- erts_fdprintf(fd, "=atoms\n");
- dump_atoms(fd, NULL);
-
- /* Keep the instrumentation data at the end of the dump */
- if (erts_instr_memory_map || erts_instr_stat) {
- erts_fdprintf(fd, "=instr_data\n");
-
- if (erts_instr_stat) {
- erts_fdprintf(fd, "=memory_status\n");
- erts_instr_dump_stat_to_fd(fd, 0);
- }
- if (erts_instr_memory_map) {
- erts_fdprintf(fd, "=memory_map\n");
- erts_instr_dump_memory_map_to_fd(fd);
- }
+ process_info(to, to_arg); /* Info about each process and port */
+ db_info(to, to_arg, 0);
+ erts_print_bif_timer_info(to, to_arg);
+ distribution_info(to, to_arg);
+ erts_cbprintf(to, to_arg, "=loaded_modules\n");
+ loaded(to, to_arg);
+ erts_dump_fun_entries(to, to_arg);
+ erts_deep_process_dump(to, to_arg);
+ erts_cbprintf(to, to_arg, "=atoms\n");
+ dump_atoms(to, to_arg);
+
+ erts_cbprintf(to, to_arg, "=end\n");
+ if (fp) {
+ fclose(fp);
}
-
- erts_fdprintf(fd, "=end\n");
close(fd);
erts_fprintf(stderr,"done\n");
}
void
-erl_crash_dump(char* file, int line, char* fmt, ...)
+erts_print_base64(fmtfn_t to, void *to_arg, byte* src, Uint size)
{
- va_list args;
-
- va_start(args, fmt);
- erl_crash_dump_v(file, line, fmt, args);
- va_end(args);
+ static const byte base64_chars[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+ while (size >= 3) {
+ erts_putc(to, to_arg, base64_chars[src[0] >> 2]);
+ erts_putc(to, to_arg, base64_chars[((src[0] & 0x03) << 4) | (src[1] >> 4)]);
+ erts_putc(to, to_arg, base64_chars[((src[1] & 0x0f) << 2) | (src[2] >> 6)]);
+ erts_putc(to, to_arg, base64_chars[src[2] & 0x3f]);
+ size -= 3;
+ src += 3;
+ }
+ if (size == 1) {
+ erts_putc(to, to_arg, base64_chars[src[0] >> 2]);
+ erts_putc(to, to_arg, base64_chars[(src[0] & 0x03) << 4]);
+ erts_print(to, to_arg, "==");
+ } else if (size == 2) {
+ erts_putc(to, to_arg, base64_chars[src[0] >> 2]);
+ erts_putc(to, to_arg, base64_chars[((src[0] & 0x03) << 4) | (src[1] >> 4)]);
+ erts_putc(to, to_arg, base64_chars[(src[1] & 0x0f) << 2]);
+ erts_putc(to, to_arg, '=');
+ }
}
diff --git a/erts/emulator/beam/bs_instrs.tab b/erts/emulator/beam/bs_instrs.tab
new file mode 100644
index 0000000000..61eb02a7a2
--- /dev/null
+++ b/erts/emulator/beam/bs_instrs.tab
@@ -0,0 +1,1038 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017-2018. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+%if ARCH_64
+BS_SAFE_MUL(A, B, Fail, Dst) {
+ Uint64 res = ($A) * ($B);
+ if (res / $B != $A) {
+ $Fail;
+ }
+ $Dst = res;
+}
+%else
+BS_SAFE_MUL(A, B, Fail, Dst) {
+ Uint64 res = (Uint64)($A) * (Uint64)($B);
+ if ((res >> (8*sizeof(Uint))) != 0) {
+ $Fail;
+ }
+ $Dst = res;
+}
+%endif
+
+BS_GET_FIELD_SIZE(Bits, Unit, Fail, Dst) {
+ Sint signed_size;
+ Uint uint_size;
+ Uint temp_bits;
+
+ if (is_small($Bits)) {
+ signed_size = signed_val($Bits);
+ if (signed_size < 0) {
+ $Fail;
+ }
+ uint_size = (Uint) signed_size;
+ } else {
+ if (!term_to_Uint($Bits, &temp_bits)) {
+ $Fail;
+ }
+ uint_size = temp_bits;
+ }
+ $BS_SAFE_MUL(uint_size, $Unit, $Fail, $Dst);
+}
+
+BS_GET_UNCHECKED_FIELD_SIZE(Bits, Unit, Fail, Dst) {
+ Sint signed_size;
+ Uint uint_size;
+ Uint temp_bits;
+
+ if (is_small($Bits)) {
+ signed_size = signed_val($Bits);
+ if (signed_size < 0) {
+ $Fail;
+ }
+ uint_size = (Uint) signed_size;
+ } else {
+ if (!term_to_Uint($Bits, &temp_bits)) {
+ $Fail;
+ }
+ uint_size = temp_bits;
+ }
+ $Dst = uint_size * $Unit;
+}
+
+TEST_BIN_VHEAP(VNh, Nh, Live) {
+ Uint need = $Nh;
+ if (E - HTOP < need || MSO(c_p).overhead + $VNh >= BIN_VHEAP_SZ(c_p)) {
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live, FCALLS);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ }
+ HEAP_SPACE_VERIFIED(need);
+}
+
+i_bs_get_binary_all2(Fail, Ms, Live, Unit, Dst) {
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+
+ $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live);
+ _mb = ms_matchbuffer($Ms);
+ if (((_mb->size - _mb->offset) % $Unit) == 0) {
+ LIGHT_SWAPOUT;
+ _result = erts_bs_get_binary_all_2(c_p, _mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ ASSERT(is_value(_result));
+ $Dst = _result;
+ } else {
+ HEAP_SPACE_VERIFIED(0);
+ $FAIL($Fail);
+ }
+}
+
+i_bs_get_binary2(Fail, Ms, Live, Sz, Flags, Dst) {
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+ Uint _size;
+ $BS_GET_FIELD_SIZE($Sz, (($Flags) >> 3), $FAIL($Fail), _size);
+ $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live);
+ _mb = ms_matchbuffer($Ms);
+ LIGHT_SWAPOUT;
+ _result = erts_bs_get_binary_2(c_p, _size, $Flags, _mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(_result)) {
+ $FAIL($Fail);
+ } else {
+ $Dst = _result;
+ }
+}
+
+i_bs_get_binary_imm2(Fail, Ms, Live, Sz, Flags, Dst) {
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+ $GC_TEST(0, heap_bin_size(ERL_ONHEAP_BIN_LIMIT), $Live);
+ _mb = ms_matchbuffer($Ms);
+ LIGHT_SWAPOUT;
+ _result = erts_bs_get_binary_2(c_p, $Sz, $Flags, _mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(_result)) {
+ $FAIL($Fail);
+ } else {
+ $Dst = _result;
+ }
+}
+
+i_bs_get_float2(Fail, Ms, Live, Sz, Flags, Dst) {
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+ Sint _size;
+
+ if (!is_small($Sz) || (_size = unsigned_val($Sz)) > 64) {
+ $FAIL($Fail);
+ }
+ _size *= (($Flags) >> 3);
+ $GC_TEST(0, FLOAT_SIZE_OBJECT, $Live);
+ _mb = ms_matchbuffer($Ms);
+ LIGHT_SWAPOUT;
+ _result = erts_bs_get_float_2(c_p, _size, ($Flags), _mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(_result)) {
+ $FAIL($Fail);
+ } else {
+ $Dst = _result;
+ }
+}
+
+i_bs_skip_bits2(Fail, Ms, Bits, Unit) {
+ ErlBinMatchBuffer *_mb;
+ size_t new_offset;
+ Uint _size;
+
+ _mb = ms_matchbuffer($Ms);
+ $BS_GET_FIELD_SIZE($Bits, $Unit, $FAIL($Fail), _size);
+ new_offset = _mb->offset + _size;
+ if (new_offset <= _mb->size) {
+ _mb->offset = new_offset;
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+i_bs_skip_bits_all2(Fail, Ms, Unit) {
+ ErlBinMatchBuffer *_mb;
+ _mb = ms_matchbuffer($Ms);
+ if (((_mb->size - _mb->offset) % $Unit) == 0) {
+ _mb->offset = _mb->size;
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+i_bs_skip_bits_imm2(Fail, Ms, Bits) {
+ ErlBinMatchBuffer *_mb;
+ size_t new_offset;
+ _mb = ms_matchbuffer($Ms);
+ new_offset = _mb->offset + ($Bits);
+ if (new_offset <= _mb->size) {
+ _mb->offset = new_offset;
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+i_new_bs_put_binary(Fail, Sz, Flags, Src) {
+ Sint _size;
+ $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
+ if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2(($Src), _size))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_binary_all(Fail, Src, Unit) {
+ if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2(($Src), ($Unit)))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_binary_imm(Fail, Sz, Src) {
+ if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2(($Src), ($Sz)))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_float(Fail, Sz, Flags, Src) {
+ Sint _size;
+ $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
+ if (!erts_new_bs_put_float(c_p, ($Src), _size, ($Flags))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_float_imm(Fail, Sz, Flags, Src) {
+ if (!erts_new_bs_put_float(c_p, ($Src), ($Sz), ($Flags))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_integer(Fail, Sz, Flags, Src) {
+ Sint _size;
+ $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size);
+ if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), _size, ($Flags)))) {
+ $BADARG($Fail);
+ }
+}
+
+i_new_bs_put_integer_imm(Fail, Sz, Flags, Src) {
+ if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), ($Sz), ($Flags)))) {
+ $BADARG($Fail);
+ }
+}
+
+#
+# i_bs_init*
+#
+
+i_bs_init_fail_heap := bs_init.fail_heap.verify.execute;
+i_bs_init_fail := bs_init.fail.verify.execute;
+i_bs_init := bs_init.plain.execute;
+i_bs_init_heap := bs_init.heap.execute;
+
+bs_init.head() {
+ Eterm BsOp1;
+ Eterm BsOp2;
+}
+
+bs_init.fail_heap(Size, HeapAlloc) {
+ BsOp1 = $Size;
+ BsOp2 = $HeapAlloc;
+}
+
+bs_init.fail(Size) {
+ BsOp1 = $Size;
+ BsOp2 = 0;
+}
+
+bs_init.plain(Size) {
+ BsOp1 = $Size;
+ BsOp2 = 0;
+}
+
+bs_init.heap(Size, HeapAlloc) {
+ BsOp1 = $Size;
+ BsOp2 = $HeapAlloc;
+}
+
+bs_init.verify(Fail) {
+ if (is_small(BsOp1)) {
+ Sint size = signed_val(BsOp1);
+ if (size < 0) {
+ $BADARG($Fail);
+ }
+ BsOp1 = (Eterm) size;
+ } else {
+ Uint bytes;
+
+ if (!term_to_Uint(BsOp1, &bytes)) {
+ c_p->freason = bytes;
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+ if ((bytes >> (8*sizeof(Uint)-3)) != 0) {
+ $SYSTEM_LIMIT($Fail);
+ }
+ BsOp1 = (Eterm) bytes;
+ }
+}
+
+bs_init.execute(Live, Dst) {
+ if (BsOp1 <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hb;
+ Uint bin_need;
+
+ bin_need = heap_bin_size(BsOp1);
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+ $GC_TEST(0, bin_need+BsOp2+ERL_SUB_BIN_SIZE, $Live);
+ hb = (ErlHeapBin *) HTOP;
+ HTOP += bin_need;
+ hb->thing_word = header_heap_bin(BsOp1);
+ hb->size = BsOp1;
+ erts_current_bin = (byte *) hb->data;
+ $Dst = make_binary(hb);
+ } else {
+ Binary* bptr;
+ ProcBin* pb;
+
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+ $TEST_BIN_VHEAP(BsOp1 / sizeof(Eterm),
+ BsOp2 + PROC_BIN_SIZE + ERL_SUB_BIN_SIZE, $Live);
+
+ /*
+ * Allocate the binary struct itself.
+ */
+ bptr = erts_bin_nrml_alloc(BsOp1);
+ erts_current_bin = (byte *) bptr->orig_bytes;
+
+ /*
+ * Now allocate the ProcBin on the heap.
+ */
+ pb = (ProcBin *) HTOP;
+ HTOP += PROC_BIN_SIZE;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = BsOp1;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*) pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+
+ OH_OVERHEAD(&(MSO(c_p)), BsOp1 / sizeof(Eterm));
+
+ $Dst = make_binary(pb);
+ }
+}
+
+#
+# i_bs_init_bits*
+#
+
+i_bs_init_bits := bs_init_bits.plain.execute;
+i_bs_init_bits_heap := bs_init_bits.heap.execute;
+i_bs_init_bits_fail := bs_init_bits.fail.verify.execute;
+i_bs_init_bits_fail_heap := bs_init_bits.fail_heap.verify.execute;
+
+bs_init_bits.head() {
+ Eterm num_bits_term;
+ Uint num_bits;
+ Uint alloc;
+}
+
+bs_init_bits.plain(NumBits) {
+ num_bits = $NumBits;
+ alloc = 0;
+}
+
+bs_init_bits.heap(NumBits, Alloc) {
+ num_bits = $NumBits;
+ alloc = $Alloc;
+}
+
+bs_init_bits.fail(NumBitsTerm) {
+ num_bits_term = $NumBitsTerm;
+ alloc = 0;
+}
+
+bs_init_bits.fail_heap(NumBitsTerm, Alloc) {
+ num_bits_term = $NumBitsTerm;
+ alloc = $Alloc;
+}
+
+bs_init_bits.verify(Fail) {
+ if (is_small(num_bits_term)) {
+ Sint size = signed_val(num_bits_term);
+ if (size < 0) {
+ $BADARG($Fail);
+ }
+ num_bits = (Uint) size;
+ } else {
+ Uint bits;
+
+ if (!term_to_Uint(num_bits_term, &bits)) {
+ c_p->freason = bits;
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+ num_bits = (Uint) bits;
+ }
+}
+
+bs_init_bits.execute(Live, Dst) {
+ Eterm new_binary;
+ Uint num_bytes = ((Uint64)num_bits+(Uint64)7) >> 3;
+
+ if (num_bits & 7) {
+ alloc += ERL_SUB_BIN_SIZE;
+ }
+ if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
+ alloc += heap_bin_size(num_bytes);
+ } else {
+ alloc += PROC_BIN_SIZE;
+ }
+ $test_heap(alloc, $Live);
+
+ /* num_bits = Number of bits to build
+ * num_bytes = Number of bytes to allocate in the binary
+ * alloc = Total number of words to allocate on heap
+ * Operands: NotUsed NotUsed Dst
+ */
+ if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hb;
+
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+ hb = (ErlHeapBin *) HTOP;
+ HTOP += heap_bin_size(num_bytes);
+ hb->thing_word = header_heap_bin(num_bytes);
+ hb->size = num_bytes;
+ erts_current_bin = (byte *) hb->data;
+ new_binary = make_binary(hb);
+
+ do_bits_sub_bin:
+ if (num_bits & 7) {
+ ErlSubBin* sb;
+
+ sb = (ErlSubBin *) HTOP;
+ HTOP += ERL_SUB_BIN_SIZE;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = num_bytes - 1;
+ sb->bitsize = num_bits & 7;
+ sb->offs = 0;
+ sb->bitoffs = 0;
+ sb->is_writable = 0;
+ sb->orig = new_binary;
+ new_binary = make_binary(sb);
+ }
+ HEAP_SPACE_VERIFIED(0);
+ $Dst = new_binary;
+ } else {
+ Binary* bptr;
+ ProcBin* pb;
+
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+
+ /*
+ * Allocate the binary struct itself.
+ */
+ bptr = erts_bin_nrml_alloc(num_bytes);
+ erts_current_bin = (byte *) bptr->orig_bytes;
+
+ /*
+ * Now allocate the ProcBin on the heap.
+ */
+ pb = (ProcBin *) HTOP;
+ HTOP += PROC_BIN_SIZE;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = num_bytes;
+ pb->next = MSO(c_p).first;
+ MSO(c_p).first = (struct erl_off_heap_header*) pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+ OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm));
+ new_binary = make_binary(pb);
+ goto do_bits_sub_bin;
+ }
+}
+
+bs_add(Fail, Src1, Src2, Unit, Dst) {
+ Eterm Op1 = $Src1;
+ Eterm Op2 = $Src2;
+ Uint unit = $Unit;
+
+ if (is_both_small(Op1, Op2)) {
+ Sint Arg1 = signed_val(Op1);
+ Sint Arg2 = signed_val(Op2);
+
+ if (Arg1 >= 0 && Arg2 >= 0) {
+ $BS_SAFE_MUL(Arg2, unit, $SYSTEM_LIMIT($Fail), Op1);
+ Op1 += Arg1;
+
+ store_bs_add_result:
+ if (Op1 <= MAX_SMALL) {
+ Op1 = make_small(Op1);
+ } else {
+ /*
+ * May generate a heap fragment, but in this
+ * particular case it is OK, since the value will be
+ * stored into an x register (the GC will scan x
+ * registers for references to heap fragments) and
+ * there is no risk that value can be stored into a
+ * location that is not scanned for heap-fragment
+ * references (such as the heap).
+ */
+ SWAPOUT;
+ Op1 = erts_make_integer(Op1, c_p);
+ HTOP = HEAP_TOP(c_p);
+ }
+ $Dst = Op1;
+ $NEXT0();
+ }
+ $BADARG($Fail);
+ } else {
+ Uint a;
+ Uint b;
+ Uint c;
+
+ /*
+ * Now we know that one of the arguments is
+ * not a small. We must convert both arguments
+ * to Uints and check for errors at the same time.
+ *
+ * Error checking is tricky.
+ *
+ * If one of the arguments is not numeric or
+ * not positive, the error reason is BADARG.
+ *
+ * Otherwise if both arguments are numeric,
+ * but at least one argument does not fit in
+ * an Uint, the reason is SYSTEM_LIMIT.
+ */
+
+ if (!term_to_Uint(Op1, &a)) {
+ if (a == BADARG) {
+ $BADARG($Fail);
+ }
+ if (!term_to_Uint(Op2, &b)) {
+ c_p->freason = b;
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+ $SYSTEM_LIMIT($Fail);
+ } else if (!term_to_Uint(Op2, &b)) {
+ c_p->freason = b;
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+
+ /*
+ * The arguments are now correct and stored in a and b.
+ */
+
+ $BS_SAFE_MUL(b, unit, $SYSTEM_LIMIT($Fail), c);
+ Op1 = a + c;
+ if (Op1 < a) {
+ /*
+ * If the result is less than one of the
+ * arguments, there must have been an overflow.
+ */
+ $SYSTEM_LIMIT($Fail);
+ }
+ goto store_bs_add_result;
+ }
+ /* No fallthrough */
+ ASSERT(0);
+}
+
+bs_put_string(Len, Ptr) {
+ erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) $Ptr, $Len));
+}
+
+i_bs_append(Fail, ExtraHeap, Live, Unit, Size, Dst) {
+ Uint live = $Live;
+ Uint res;
+
+ HEAVY_SWAPOUT;
+ reg[live] = x(SCRATCH_X_REG);
+ res = erts_bs_append(c_p, reg, live, $Size, $ExtraHeap, $Unit);
+ HEAVY_SWAPIN;
+ if (is_non_value(res)) {
+ /* c_p->freason is already set (to BADARG or SYSTEM_LIMIT). */
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+ $Dst = res;
+}
+
+i_bs_private_append(Fail, Unit, Size, Src, Dst) {
+ Eterm res;
+
+ res = erts_bs_private_append(c_p, $Src, $Size, $Unit);
+ if (is_non_value(res)) {
+ /* c_p->freason is already set (to BADARG or SYSTEM_LIMIT). */
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+ $Dst = res;
+}
+
+bs_init_writable() {
+ HEAVY_SWAPOUT;
+ r(0) = erts_bs_init_writable(c_p, r(0));
+ HEAVY_SWAPIN;
+}
+
+i_bs_utf8_size(Src, Dst) {
+ Eterm arg = $Src;
+ Eterm result;
+
+ /*
+ * Calculate the number of bytes needed to encode the source
+ * operand to UTF-8. If the source operand is invalid (e.g. wrong
+ * type or range) we return a nonsense integer result (0 or 4). We
+ * can get away with that because we KNOW that bs_put_utf8 will do
+ * full error checking.
+ */
+
+ if (arg < make_small(0x80UL)) {
+ result = make_small(1);
+ } else if (arg < make_small(0x800UL)) {
+ result = make_small(2);
+ } else if (arg < make_small(0x10000UL)) {
+ result = make_small(3);
+ } else {
+ result = make_small(4);
+ }
+ $Dst = result;
+}
+
+i_bs_put_utf8(Fail, Src) {
+ if (!erts_bs_put_utf8(ERL_BITS_ARGS_1($Src))) {
+ $BADARG($Fail);
+ }
+}
+
+i_bs_utf16_size(Src, Dst) {
+ Eterm arg = $Src;
+ Eterm result = make_small(2);
+
+ /*
+ * Calculate the number of bytes needed to encode the source
+ * operarand to UTF-16. If the source operand is invalid (e.g. wrong
+ * type or range) we return a nonsense integer result (2 or 4). We
+ * can get away with that because we KNOW that bs_put_utf16 will do
+ * full error checking.
+ */
+
+ if (arg >= make_small(0x10000UL)) {
+ result = make_small(4);
+ }
+ $Dst = result;
+}
+
+bs_put_utf16(Fail, Flags, Src) {
+ if (!erts_bs_put_utf16(ERL_BITS_ARGS_2($Src, $Flags))) {
+ $BADARG($Fail);
+ }
+}
+
+// Validate a value about to be stored in a binary.
+i_bs_validate_unicode(Fail, Src) {
+ Eterm val = $Src;
+
+ /*
+ * There is no need to untag the integer, but it IS necessary
+ * to make sure it is small (if the term is a bignum, it could
+ * slip through the test, and there is no further test that
+ * would catch it, since bit syntax construction silently masks
+ * too big numbers).
+ */
+ if (is_not_small(val) || val > make_small(0x10FFFFUL) ||
+ (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL))) {
+ $BADARG($Fail);
+ }
+}
+
+// Validate a value that has been matched out.
+i_bs_validate_unicode_retract(Fail, Src, Ms) {
+ /*
+ * There is no need to untag the integer, but it IS necessary
+ * to make sure it is small (a bignum pointer could fall in
+ * the valid range).
+ */
+
+ Eterm i = $Src;
+ if (is_not_small(i) || i > make_small(0x10FFFFUL) ||
+ (make_small(0xD800UL) <= i && i <= make_small(0xDFFFUL))) {
+ Eterm ms = $Ms; /* Match context */
+ ErlBinMatchBuffer* mb;
+
+ /* Invalid value. Retract the position in the binary. */
+ mb = ms_matchbuffer(ms);
+ mb->offset -= 32;
+ $BADARG($Fail);
+ }
+}
+
+
+//
+// Matching of binaries.
+//
+
+i_bs_start_match2 := bs_start_match.fetch.execute;
+
+bs_start_match.head() {
+ Eterm context;
+}
+
+bs_start_match.fetch(Src) {
+ context = $Src;
+}
+
+bs_start_match.execute(Fail, Live, Slots, Dst) {
+ Uint slots;
+ Uint live;
+ Eterm header;
+ if (!is_boxed(context)) {
+ $FAIL($Fail);
+ }
+ header = *boxed_val(context);
+ slots = $Slots;
+ live = $Live;
+ if (header_is_bin_matchstate(header)) {
+ ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context);
+ Uint actual_slots = HEADER_NUM_SLOTS(header);
+ ms->save_offset[0] = ms->mb.offset;
+ if (actual_slots < slots) {
+ ErlBinMatchState* dst;
+ Uint live = $Live;
+ Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
+
+ $GC_TEST_PRESERVE(wordsneeded, live, context);
+ ms = (ErlBinMatchState *) boxed_val(context);
+ dst = (ErlBinMatchState *) HTOP;
+ *dst = *ms;
+ *HTOP = HEADER_BIN_MATCHSTATE(slots);
+ HTOP += wordsneeded;
+ HEAP_SPACE_VERIFIED(0);
+ $Dst = make_matchstate(dst);
+ }
+ } else if (is_binary_header(header)) {
+ Eterm result;
+ Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
+ $GC_TEST_PRESERVE(wordsneeded, live, context);
+ HEAP_TOP(c_p) = HTOP;
+#ifdef DEBUG
+ c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */
+#endif
+ result = erts_bs_start_match_2(c_p, context, slots);
+ HTOP = HEAP_TOP(c_p);
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(result)) {
+ $FAIL($Fail);
+ }
+ $Dst = result;
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+bs_test_zero_tail2(Fail, Ctx) {
+ ErlBinMatchBuffer *_mb;
+ _mb = (ErlBinMatchBuffer*) ms_matchbuffer($Ctx);
+ if (_mb->size != _mb->offset) {
+ $FAIL($Fail);
+ }
+}
+
+bs_test_tail_imm2(Fail, Ctx, Offset) {
+ ErlBinMatchBuffer *_mb;
+ _mb = ms_matchbuffer($Ctx);
+ if (_mb->size - _mb->offset != $Offset) {
+ $FAIL($Fail);
+ }
+}
+
+bs_test_unit(Fail, Ctx, Unit) {
+ ErlBinMatchBuffer *_mb;
+ _mb = ms_matchbuffer($Ctx);
+ if ((_mb->size - _mb->offset) % $Unit) {
+ $FAIL($Fail);
+ }
+}
+
+bs_test_unit8(Fail, Ctx) {
+ ErlBinMatchBuffer *_mb;
+ _mb = ms_matchbuffer($Ctx);
+ if ((_mb->size - _mb->offset) & 7) {
+ $FAIL($Fail);
+ }
+}
+
+i_bs_get_integer_8(Ctx, Fail, Dst) {
+ Eterm _result;
+ ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+
+ if (_mb->size - _mb->offset < 8) {
+ $FAIL($Fail);
+ }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _result = erts_bs_get_integer_2(c_p, 8, 0, _mb);
+ } else {
+ _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]);
+ _mb->offset += 8;
+ }
+ $Dst = _result;
+}
+
+i_bs_get_integer_16(Ctx, Fail, Dst) {
+ Eterm _result;
+ ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+
+ if (_mb->size - _mb->offset < 16) {
+ $FAIL($Fail);
+ }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _result = erts_bs_get_integer_2(c_p, 16, 0, _mb);
+ } else {
+ _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset)));
+ _mb->offset += 16;
+ }
+ $Dst = _result;
+}
+
+%if ARCH_64
+i_bs_get_integer_32(Ctx, Fail, Dst) {
+ Uint32 _integer;
+ ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx);
+
+ if (_mb->size - _mb->offset < 32) {
+ $FAIL($Fail);
+ }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _integer = erts_bs_get_unaligned_uint32(_mb);
+ } else {
+ _integer = get_int32(_mb->base + _mb->offset/8);
+ }
+ _mb->offset += 32;
+ $Dst = make_small(_integer);
+}
+%endif
+
+i_bs_get_integer_imm := bs_get_integer.fetch.execute;
+i_bs_get_integer_small_imm := bs_get_integer.fetch_small.execute;
+
+bs_get_integer.head() {
+ Eterm Ms, Sz;
+}
+
+bs_get_integer.fetch(Ctx, Size, Live) {
+ Uint wordsneeded;
+ Ms = $Ctx;
+ Sz = $Size;
+ wordsneeded = 1+WSIZE(NBYTES(Sz));
+ $GC_TEST_PRESERVE(wordsneeded, $Live, Ms);
+}
+
+bs_get_integer.fetch_small(Ctx, Size) {
+ Ms = $Ctx;
+ Sz = $Size;
+}
+
+bs_get_integer.execute(Fail, Flags, Dst) {
+ ErlBinMatchBuffer* mb;
+ Eterm result;
+
+ mb = ms_matchbuffer(Ms);
+ LIGHT_SWAPOUT;
+ result = erts_bs_get_integer_2(c_p, Sz, $Flags, mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(result)) {
+ $FAIL($Fail);
+ }
+ $Dst = result;
+}
+
+i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) {
+ Uint flags;
+ Uint size;
+ Eterm ms;
+ ErlBinMatchBuffer* mb;
+ Eterm result;
+
+ flags = $FlagsAndUnit;
+ ms = $Ms;
+ $BS_GET_FIELD_SIZE($Sz, (flags >> 3), $FAIL($Fail), size);
+ if (size >= SMALL_BITS) {
+ Uint wordsneeded;
+ /* Check bits size before potential gc.
+ * We do not want a gc and then realize we don't need
+ * the allocated space (i.e. if the op fails).
+ *
+ * Remember to re-acquire the matchbuffer after gc.
+ */
+
+ mb = ms_matchbuffer(ms);
+ if (mb->size - mb->offset < size) {
+ $FAIL($Fail);
+ }
+ wordsneeded = 1+WSIZE(NBYTES((Uint) size));
+ $GC_TEST_PRESERVE(wordsneeded, $Live, ms);
+ }
+ mb = ms_matchbuffer(ms);
+ LIGHT_SWAPOUT;
+ result = erts_bs_get_integer_2(c_p, size, flags, mb);
+ LIGHT_SWAPIN;
+ HEAP_SPACE_VERIFIED(0);
+ if (is_non_value(result)) {
+ $FAIL($Fail);
+ }
+ $Dst = result;
+}
+
+i_bs_get_utf8(Ctx, Fail, Dst) {
+ Eterm result;
+ ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx);
+
+ if (mb->size - mb->offset < 8) {
+ $FAIL($Fail);
+ }
+ if (BIT_OFFSET(mb->offset) != 0) {
+ result = erts_bs_get_utf8(mb);
+ } else {
+ byte b = mb->base[BYTE_OFFSET(mb->offset)];
+ if (b < 128) {
+ result = make_small(b);
+ mb->offset += 8;
+ } else {
+ result = erts_bs_get_utf8(mb);
+ }
+ }
+ if (is_non_value(result)) {
+ $FAIL($Fail);
+ }
+ $Dst = result;
+}
+
+i_bs_get_utf16(Ctx, Fail, Flags, Dst) {
+ ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx);
+ Eterm result = erts_bs_get_utf16(mb, $Flags);
+
+ if (is_non_value(result)) {
+ $FAIL($Fail);
+ }
+ $Dst = result;
+}
+
+bs_context_to_binary := ctx_to_bin.fetch.execute;
+i_bs_get_binary_all_reuse := ctx_to_bin.fetch_bin.execute;
+
+ctx_to_bin.head() {
+ Eterm context;
+ ErlBinMatchBuffer* mb;
+ Uint size;
+ Uint offs;
+}
+
+ctx_to_bin.fetch(Src) {
+ context = $Src;
+ if (is_boxed(context) &&
+ header_is_bin_matchstate(*boxed_val(context))) {
+ ErlBinMatchState* ms;
+ ms = (ErlBinMatchState *) boxed_val(context);
+ mb = &ms->mb;
+ offs = ms->save_offset[0];
+ size = mb->size - offs;
+ } else {
+ $NEXT0();
+ }
+}
+
+ctx_to_bin.fetch_bin(Src, Fail, Unit) {
+ context = $Src;
+ mb = ms_matchbuffer(context);
+ size = mb->size - mb->offset;
+ if (size % $Unit != 0) {
+ $FAIL($Fail);
+ }
+ offs = mb->offset;
+}
+
+ctx_to_bin.execute() {
+ Uint hole_size;
+ Uint orig = mb->orig;
+ ErlSubBin* sb = (ErlSubBin *) boxed_val(context);
+ /* Since we're going to overwrite the match state with the result, an
+ * ErlBinMatchState must be at least as large as an ErlSubBin. */
+ ERTS_CT_ASSERT(sizeof(ErlSubBin) <= sizeof(ErlBinMatchState));
+ hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = BYTE_OFFSET(size);
+ sb->bitsize = BIT_OFFSET(size);
+ sb->offs = BYTE_OFFSET(offs);
+ sb->bitoffs = BIT_OFFSET(offs);
+ sb->is_writable = 0;
+ sb->orig = orig;
+ if (hole_size) {
+ sb[1].thing_word = make_pos_bignum_header(hole_size-1);
+ }
+}
+
+i_bs_match_string(Ctx, Fail, Bits, Ptr) {
+ byte* bytes = (byte *) $Ptr;
+ Uint bits = $Bits;
+ ErlBinMatchBuffer* mb;
+ Uint offs;
+
+ mb = ms_matchbuffer($Ctx);
+ if (mb->size - mb->offset < bits) {
+ $FAIL($Fail);
+ }
+ offs = mb->offset & 7;
+ if (offs == 0 && (bits & 7) == 0) {
+ if (sys_memcmp(bytes, mb->base+(mb->offset>>3), bits>>3)) {
+ $FAIL($Fail);
+ }
+ } else if (erts_cmp_bits(bytes, 0, mb->base+(mb->offset>>3), mb->offset & 7, bits)) {
+ $FAIL($Fail);
+ }
+ mb->offset += bits;
+}
+
+i_bs_save2(Src, Slot) {
+ ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src);
+ _ms->save_offset[$Slot] = _ms->mb.offset;
+}
+
+i_bs_restore2(Src, Slot) {
+ ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src);
+ _ms->mb.offset = _ms->save_offset[$Slot];
+}
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index ec6267711b..50352b4084 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -34,8 +34,8 @@
# define CIX_TRACE(text)
#endif
-erts_smp_atomic32_t the_active_code_index;
-erts_smp_atomic32_t the_staging_code_index;
+erts_atomic32_t the_active_code_index;
+erts_atomic32_t the_staging_code_index;
static Process* code_writing_process = NULL;
struct code_write_queue_item {
@@ -43,7 +43,7 @@ struct code_write_queue_item {
struct code_write_queue_item* next;
};
static struct code_write_queue_item* code_write_queue = NULL;
-static erts_smp_mtx_t code_write_permission_mtx;
+static erts_mtx_t code_write_permission_mtx;
#ifdef ERTS_ENABLE_LOCK_CHECK
static erts_tsd_key_t has_code_write_permission;
@@ -55,9 +55,10 @@ void erts_code_ix_init(void)
* single threaded with active and staging set both to zero.
* Preloading is finished by a commit that will set things straight.
*/
- erts_smp_atomic32_init_nob(&the_active_code_index, 0);
- erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
- erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission");
+ erts_atomic32_init_nob(&the_active_code_index, 0);
+ erts_atomic32_init_nob(&the_staging_code_index, 0);
+ erts_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_tsd_key_create(&has_code_write_permission,
"erts_has_code_write_permission");
@@ -90,9 +91,9 @@ void erts_commit_staging_code_ix(void)
/* We need to this lock as we are now making the staging export table active */
export_staging_lock();
ix = erts_staging_code_ix();
- erts_smp_atomic32_set_nob(&the_active_code_index, ix);
+ erts_atomic32_set_nob(&the_active_code_index, ix);
ix = (ix + 1) % ERTS_NUM_CODE_IX;
- erts_smp_atomic32_set_nob(&the_staging_code_index, ix);
+ erts_atomic32_set_nob(&the_staging_code_index, ix);
export_staging_unlock();
erts_tracer_nif_clear();
CIX_TRACE("activate");
@@ -114,12 +115,10 @@ void erts_abort_staging_code_ix(void)
int erts_try_seize_code_write_permission(Process* c_p)
{
int success;
-#ifdef ERTS_SMP
- ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */
-#endif
+ ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */
ASSERT(c_p != NULL);
- erts_smp_mtx_lock(&code_write_permission_mtx);
+ erts_mtx_lock(&code_write_permission_mtx);
success = (code_writing_process == NULL);
if (success) {
code_writing_process = c_p;
@@ -137,21 +136,21 @@ int erts_try_seize_code_write_permission(Process* c_p)
code_write_queue = qitem;
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
}
- erts_smp_mtx_unlock(&code_write_permission_mtx);
+ erts_mtx_unlock(&code_write_permission_mtx);
return success;
}
void erts_release_code_write_permission(void)
{
- erts_smp_mtx_lock(&code_write_permission_mtx);
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ erts_mtx_lock(&code_write_permission_mtx);
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
while (code_write_queue != NULL) { /* unleash the entire herd */
struct code_write_queue_item* qitem = code_write_queue;
- erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
if (!ERTS_PROC_IS_EXITING(qitem->p)) {
erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
}
- erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
code_write_queue = qitem->next;
erts_proc_dec_refc(qitem->p);
erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
@@ -160,7 +159,7 @@ void erts_release_code_write_permission(void)
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_tsd_set(has_code_write_permission, (void *) 0);
#endif
- erts_smp_mtx_unlock(&code_write_permission_mtx);
+ erts_mtx_unlock(&code_write_permission_mtx);
}
#ifdef ERTS_ENABLE_LOCK_CHECK
diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h
index 584a605771..42976d2301 100644
--- a/erts/emulator/beam/code_ix.h
+++ b/erts/emulator/beam/code_ix.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -56,12 +56,55 @@
# endif
# include "sys.h"
#endif
+
+#include "beam_opcodes.h"
+
struct process;
#define ERTS_NUM_CODE_IX 3
typedef unsigned ErtsCodeIndex;
+typedef struct ErtsCodeMFA_ {
+ Eterm module;
+ Eterm function;
+ Uint arity;
+} ErtsCodeMFA;
+
+/*
+ * The ErtsCodeInfo structure is used both in the Export entry
+ * and in the code as the function header.
+ */
+
+/* If you change the size of this, you also have to update the code
+ in ops.tab to reflect the new func_info size */
+typedef struct ErtsCodeInfo_ {
+ BeamInstr op; /* OpCode(i_func_info) */
+ union {
+ struct generic_bp* gen_bp; /* Trace breakpoint */
+#ifdef HIPE
+ void (*ncallee)(void);
+ struct hipe_call_count* hcc;
+#endif
+ }u;
+ ErtsCodeMFA mfa;
+} ErtsCodeInfo;
+
+/* Get the code associated with a ErtsCodeInfo ptr. */
+ERTS_GLB_INLINE
+BeamInstr *erts_codeinfo_to_code(ErtsCodeInfo *ci);
+
+/* Get the ErtsCodeInfo for from a code ptr. */
+ERTS_GLB_INLINE
+ErtsCodeInfo *erts_code_to_codeinfo(BeamInstr *I);
+
+/* Get the code associated with a ErtsCodeMFA ptr. */
+ERTS_GLB_INLINE
+BeamInstr *erts_codemfa_to_code(ErtsCodeMFA *mfa);
+
+/* Get the ErtsCodeMFA from a code ptr. */
+ERTS_GLB_INLINE
+ErtsCodeMFA *erts_code_to_codemfa(BeamInstr *I);
/* Called once at emulator initialization.
*/
@@ -121,20 +164,57 @@ void erts_abort_staging_code_ix(void);
int erts_has_code_write_permission(void);
#endif
-
+/* module/function/arity can be NIL/NIL/-1 when the MFA is pointing to some
+ invalid code, for instance unloaded_fun. */
+#define ASSERT_MFA(MFA) \
+ ASSERT((is_atom((MFA)->module) || is_nil((MFA)->module)) && \
+ (is_atom((MFA)->function) || is_nil((MFA)->function)) && \
+ (((MFA)->arity >= 0 && (MFA)->arity < 1024) || (MFA)->arity == -1))
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-extern erts_smp_atomic32_t the_active_code_index;
-extern erts_smp_atomic32_t the_staging_code_index;
+ERTS_GLB_INLINE
+BeamInstr *erts_codeinfo_to_code(ErtsCodeInfo *ci)
+{
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI) || !ci->op);
+ ASSERT_MFA(&ci->mfa);
+ return (BeamInstr*)(ci + 1);
+}
+
+ERTS_GLB_INLINE
+ErtsCodeInfo *erts_code_to_codeinfo(BeamInstr *I)
+{
+ ErtsCodeInfo *ci = ((ErtsCodeInfo *)(((char *)(I)) - sizeof(ErtsCodeInfo)));
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI) || !ci->op);
+ ASSERT_MFA(&ci->mfa);
+ return ci;
+}
+
+ERTS_GLB_INLINE
+BeamInstr *erts_codemfa_to_code(ErtsCodeMFA *mfa)
+{
+ ASSERT_MFA(mfa);
+ return (BeamInstr*)(mfa + 1);
+}
+
+ERTS_GLB_INLINE
+ErtsCodeMFA *erts_code_to_codemfa(BeamInstr *I)
+{
+ ErtsCodeMFA *mfa = ((ErtsCodeMFA *)(((char *)(I)) - sizeof(ErtsCodeMFA)));
+ ASSERT_MFA(mfa);
+ return mfa;
+}
+
+extern erts_atomic32_t the_active_code_index;
+extern erts_atomic32_t the_staging_code_index;
ERTS_GLB_INLINE ErtsCodeIndex erts_active_code_ix(void)
{
- return erts_smp_atomic32_read_nob(&the_active_code_index);
+ return erts_atomic32_read_nob(&the_active_code_index);
}
ERTS_GLB_INLINE ErtsCodeIndex erts_staging_code_ix(void)
{
- return erts_smp_atomic32_read_nob(&the_staging_code_index);
+ return erts_atomic32_read_nob(&the_staging_code_index);
}
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index ccc4cbad43..e7bd046e18 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -40,7 +40,8 @@ static void move_one_frag(Eterm** hpp, ErlHeapFragment*, ErlOffHeap*, int);
/*
* Copy object "obj" to process p.
*/
-Eterm copy_object_x(Eterm obj, Process* to, Uint extra) {
+Eterm copy_object_x(Eterm obj, Process* to, Uint extra)
+{
if (!is_immed(obj)) {
Uint size = size_object(obj);
Eterm* hp = HAllocX(to, size, extra);
@@ -70,33 +71,46 @@ Eterm copy_object_x(Eterm obj, Process* to, Uint extra) {
* Return the "flat" size of the object.
*/
-Uint size_object(Eterm obj)
+#define in_literal_purge_area(PTR) \
+ (lit_purge_ptr && ( \
+ (lit_purge_ptr <= (PTR) && \
+ (PTR) < (lit_purge_ptr + lit_purge_sz))))
+
+Uint size_object_x(Eterm obj, erts_literal_area_t *litopt)
{
Uint sum = 0;
Eterm* ptr;
int arity;
+ Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
+ Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
#endif
-
DECLARE_ESTACK(s);
-
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size_object %p\n", mypid, obj));
for (;;) {
switch (primary_tag(obj)) {
case TAG_PRIMARY_LIST:
- sum += 2;
ptr = list_val(obj);
+ if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
+ goto pop_next;
+ }
+ sum += 2;
obj = *ptr++;
if (!IS_CONST(obj)) {
ESTACK_PUSH(s, obj);
- }
+ }
obj = *ptr;
break;
case TAG_PRIMARY_BOXED:
{
- Eterm hdr = *boxed_val(obj);
+ Eterm hdr;
+ ptr = boxed_val(obj);
+ if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) {
+ goto pop_next;
+ }
+ hdr = *ptr;
ASSERT(is_header(hdr));
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
@@ -272,17 +286,8 @@ do { \
(dst) = result; \
} while(0)
-#define BOXED_VISITED_MASK ((Eterm) 3)
-#define BOXED_VISITED ((Eterm) 1)
-#define BOXED_SHARED_UNPROCESSED ((Eterm) 2)
-#define BOXED_SHARED_PROCESSED ((Eterm) 3)
-
#define COUNT_OFF_HEAP (0)
-#define IN_LITERAL_PURGE_AREA(info, ptr) \
- ((info)->range_ptr && ( \
- (info)->range_ptr <= (ptr) && \
- (ptr) < ((info)->range_ptr + (info)->range_sz)))
/*
* Return the real size of an object and find sharing information
* This currently returns the same as erts_debug:size/1.
@@ -599,14 +604,14 @@ cleanup:
/*
* Copy a structure to a heap.
*/
-Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz)
+Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz, erts_literal_area_t *litopt)
{
char* hstart;
Uint hsize;
Eterm* htop;
Eterm* hbot;
Eterm* hp;
- Eterm* objp;
+ Eterm* ERTS_RESTRICT objp;
Eterm* tp;
Eterm res;
Eterm elem;
@@ -616,6 +621,8 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
Eterm hdr;
Eterm *hend;
int i;
+ Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL;
+ Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0;
#ifdef DEBUG
Eterm org_obj = obj;
Uint org_sz = sz;
@@ -651,7 +658,6 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy:
while (hp != htop) {
obj = *hp;
-
switch (primary_tag(obj)) {
case TAG_PRIMARY_IMMED1:
hp++;
@@ -667,6 +673,10 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy_list:
tailp = argp;
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *tailp = obj;
+ goto L_copy;
+ }
for (;;) {
tp = tailp;
elem = CAR(objp);
@@ -674,18 +684,23 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
hbot -= 2;
CAR(hbot) = elem;
tailp = &CDR(hbot);
- }
- else {
+ } else {
CAR(htop) = elem;
tailp = &CDR(htop);
htop += 2;
}
*tp = make_list(tailp - 1);
obj = CDR(objp);
+
if (!is_list(obj)) {
break;
}
objp = list_val(obj);
+
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *tailp = obj;
+ goto L_copy;
+ }
}
switch (primary_tag(obj)) {
case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy;
@@ -695,7 +710,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
"%s, line %d: Internal error in copy_struct: 0x%08x\n",
__FILE__, __LINE__,obj);
}
-
+
case TAG_PRIMARY_BOXED:
if (ErtsInArea(boxed_val(obj),hstart,hsize)) {
hp++;
@@ -705,6 +720,10 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
L_copy_boxed:
objp = boxed_val(obj);
+ if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) {
+ *argp = obj;
+ break;
+ }
hdr = *objp;
switch (hdr & _TAG_HEADER_MASK) {
case ARITYVAL_SUBTAG:
@@ -742,7 +761,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
}
*argp = make_binary(hbot);
pb = (ProcBin*) hbot;
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
pb->next = off_heap->first;
pb->flags = 0;
off_heap->first = (struct erl_off_heap_header*) pb;
@@ -765,7 +784,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
extra_bytes = 1;
} else {
extra_bytes = 0;
- }
+ }
real_size = size+extra_bytes;
objp = binary_val(real_bin);
if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
@@ -780,7 +799,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
} else {
ProcBin* from = (ProcBin *) objp;
ProcBin* to;
-
+
ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
if (from->flags) {
erts_emasculate_writable_binary(from);
@@ -790,7 +809,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
to->thing_word = HEADER_PROC_BIN;
to->size = real_size;
to->val = from->val;
- erts_refc_inc(&to->val->refc, 2);
+ erts_refc_inc(&to->val->intern.refc, 2);
to->bytes = from->bytes + offset;
to->next = off_heap->first;
to->flags = 0;
@@ -834,20 +853,24 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
{
- ExternalThing *etp = (ExternalThing *) htop;
-
+ ExternalThing *etp = (ExternalThing *) objp;
+ erts_refc_inc(&etp->node->refc, 2);
+ }
+ L_off_heap_node_container_common:
+ {
+ struct erl_off_heap_header *ohhp;
+ ohhp = (struct erl_off_heap_header *) htop;
i = thing_arityval(hdr) + 1;
+ *argp = make_boxed(htop);
tp = htop;
while (i--) {
*htop++ = *objp++;
}
- etp->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*)etp;
- erts_refc_inc(&etp->node->refc, 2);
+ ohhp->next = off_heap->first;
+ off_heap->first = ohhp;
- *argp = make_external(tp);
}
break;
case MAP_SUBTAG:
@@ -875,6 +898,13 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
case BIN_MATCHSTATE_SUBTAG:
erts_exit(ERTS_ABORT_EXIT,
"copy_struct: matchstate term not allowed");
+ case REF_SUBTAG:
+ if (is_magic_ref_thing(objp)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) objp;
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
+ goto L_off_heap_node_container_common;
+ }
+ /* Fall through... */
default:
i = thing_arityval(hdr)+1;
hbot -= i;
@@ -900,6 +930,12 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
*bsz = hend - hbot;
} else {
#ifdef DEBUG
+ if (!eq(org_obj, res)) {
+ erts_exit(ERTS_ABORT_EXIT,
+ "Internal error in copy_struct() when copying %T:"
+ " not equal to copy %T\n",
+ org_obj, res);
+ }
if (htop != hbot)
erts_exit(ERTS_ABORT_EXIT,
"Internal error in copy_struct() when copying %T:"
@@ -1036,6 +1072,9 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
Uint e;
unsigned sz;
Eterm* ptr;
+ Eterm *lit_purge_ptr = info->lit_purge_ptr;
+ Uint lit_purge_sz = info->lit_purge_sz;
+ int copy_literals = info->copy_literals;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
#endif
@@ -1081,7 +1120,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (IN_LITERAL_PURGE_AREA(info,ptr))
+ if (copy_literals || in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1132,7 +1171,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info)
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj));
- if (IN_LITERAL_PURGE_AREA(info,ptr))
+ if (copy_literals || in_literal_purge_area(ptr))
info->literal_size += size_object(obj);
goto pop_next;
}
@@ -1298,6 +1337,9 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
Eterm* resp;
Eterm *hbot, *hend;
unsigned remaining;
+ Eterm *lit_purge_ptr = info->lit_purge_ptr;
+ Uint lit_purge_sz = info->lit_purge_sz;
+ int copy_literals = info->copy_literals;
#ifdef DEBUG
Eterm mypid = erts_get_current_pid();
Eterm saved_obj = obj;
@@ -1347,11 +1389,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = list_val(obj);
/* off heap list pointers are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!IN_LITERAL_PURGE_AREA(info,ptr)) {
+ if (!(copy_literals || in_literal_purge_area(ptr))) {
*resp = obj;
} else {
Uint bsz = 0;
- *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz);
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
hbot -= bsz;
}
goto cleanup_next;
@@ -1415,11 +1457,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
ptr = boxed_val(obj);
/* off heap pointers to boxes are copied verbatim */
if (erts_is_literal(obj,ptr)) {
- if (!IN_LITERAL_PURGE_AREA(info,ptr)) {
+ if (!(copy_literals || in_literal_purge_area(ptr))) {
*resp = obj;
} else {
Uint bsz = 0;
- *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz);
+ *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */
hbot -= bsz;
}
goto cleanup_next;
@@ -1545,7 +1587,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
while (sz-- > 0) {
*hp++ = *ptr++;
}
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
pb->next = off_heap->first;
pb->flags = 0;
off_heap->first = (struct erl_off_heap_header*) pb;
@@ -1604,7 +1646,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
to->thing_word = HEADER_PROC_BIN;
to->size = real_size;
to->val = from->val;
- erts_refc_inc(&to->val->refc, 2);
+ erts_refc_inc(&to->val->intern.refc, 2);
to->bytes = from->bytes + offset;
to->next = off_heap->first;
to->flags = 0;
@@ -1615,20 +1657,33 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
}
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
- case EXTERNAL_REF_SUBTAG: {
- ExternalThing *etp = (ExternalThing *) hp;
+ case EXTERNAL_REF_SUBTAG:
+ {
+ ExternalThing *etp = (ExternalThing *) ptr;
+ erts_refc_inc(&etp->node->refc, 2);
+ }
+ off_heap_node_container_common:
+ {
+ struct erl_off_heap_header *ohhp;
+ ohhp = (struct erl_off_heap_header *) hp;
sz = thing_arityval(hdr);
- *resp = make_external(hp);
+ *resp = make_boxed(hp);
*hp++ = hdr;
ptr++;
while (sz-- > 0) {
*hp++ = *ptr++;
}
- etp->next = off_heap->first;
- off_heap->first = (struct erl_off_heap_header*) etp;
- erts_refc_inc(&etp->node->refc, 2);
+ ohhp->next = off_heap->first;
+ off_heap->first = ohhp;
goto cleanup_next;
}
+ case REF_SUBTAG:
+ if (is_magic_ref_thing(ptr)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) ptr;
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
+ goto off_heap_node_container_common;
+ }
+ /* Fall through... */
default:
sz = thing_arityval(hdr);
*resp = make_boxed(hp);
@@ -1768,7 +1823,8 @@ all_clean:
*
* NOTE: Assumes that term is a tuple (ptr is an untagged tuple ptr).
*/
-Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
+Eterm copy_shallow(Eterm* ERTS_RESTRICT ptr, Uint sz, Eterm** hpp,
+ ErlOffHeap* off_heap)
{
Eterm* tp = ptr;
Eterm* hp = *hpp;
@@ -1794,7 +1850,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
case REFC_BINARY_SUBTAG:
{
ProcBin* pb = (ProcBin *) (tp-1);
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
}
goto off_heap_common;
@@ -1825,6 +1881,15 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
off_heap->first = ohh;
}
break;
+ case REF_SUBTAG: {
+ ErtsRefThing *rtp = (ErtsRefThing *) (tp - 1);
+ if (is_magic_ref_thing(rtp)) {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) rtp;
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
+ goto off_heap_common;
+ }
+ /* Fall through... */
+ }
default:
{
int tari = header_arity(val);
@@ -1923,8 +1988,11 @@ move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int lite
if (is_header(val)) {
struct erl_off_heap_header* hdr = (struct erl_off_heap_header*)hp;
ASSERT(ptr + header_arity(val) < end);
- MOVE_BOXED(ptr, val, hp, &dummy_ref);
+ ptr = move_boxed(ptr, val, &hp, &dummy_ref);
switch (val & _HEADER_SUBTAG_MASK) {
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(hdr))
+ break;
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
case EXTERNAL_PID_SUBTAG:
@@ -1937,7 +2005,7 @@ move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int lite
}
else { /* must be a cons cell */
ASSERT(ptr+1 < end);
- MOVE_CONS(ptr, val, hp, &dummy_ref);
+ move_cons(ptr, val, &hp, &dummy_ref);
ptr += 2;
}
}
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 09c83f1117..0633bff3c2 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,6 +25,7 @@
/* define this to get a lot of debug output */
/* #define ERTS_DIST_MSG_DBG */
+/* #define ERTS_RAW_DIST_MSG_DBG */
#ifdef HAVE_CONFIG_H
# include "config.h"
@@ -44,6 +45,7 @@
#include "erl_binary.h"
#include "erl_thr_progress.h"
#include "dtrace-wrapper.h"
+#include "erl_proc_sig_queue.h"
#define DIST_CTL_DEFAULT_SIZE 64
@@ -102,35 +104,27 @@ dist_msg_dbg(ErtsDistExternal *edep, char *what, byte *buf, int sz)
-#define PASS_THROUGH 'p' /* This code should go */
-
int erts_is_alive; /* System must be blocked on change */
int erts_dist_buf_busy_limit;
/* distribution trap functions */
-Export* dsend2_trap = NULL;
-Export* dsend3_trap = NULL;
-/*Export* dsend_nosuspend_trap = NULL;*/
-Export* dlink_trap = NULL;
-Export* dunlink_trap = NULL;
Export* dmonitor_node_trap = NULL;
-Export* dgroup_leader_trap = NULL;
-Export* dexit_trap = NULL;
-Export* dmonitor_p_trap = NULL;
/* local variables */
-
+static Export *dist_ctrl_put_data_trap;
/* forward declarations */
-static void clear_dist_entry(DistEntry*);
static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy);
static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm);
static void init_nodes_monitors(void);
+static Sint abort_connection(DistEntry* dep, Uint32 conn_id);
+static ErtsDistOutputBuf* clear_de_out_queues(DistEntry*);
+static void free_de_out_queues(DistEntry*, ErtsDistOutputBuf*);
-static erts_smp_atomic_t no_caches;
-static erts_smp_atomic_t no_nodes;
+static erts_atomic_t no_caches;
+static erts_atomic_t no_nodes;
struct {
Eterm reason;
@@ -143,8 +137,8 @@ delete_cache(ErtsAtomCache *cache)
{
if (cache) {
erts_free(ERTS_ALC_T_DCACHE, (void *) cache);
- ASSERT(erts_smp_atomic_read_nob(&no_caches) > 0);
- erts_smp_atomic_dec_nob(&no_caches);
+ ASSERT(erts_atomic_read_nob(&no_caches) > 0);
+ erts_atomic_dec_nob(&no_caches);
}
}
@@ -155,14 +149,12 @@ create_cache(DistEntry *dep)
int i;
ErtsAtomCache *cp;
- ERTS_SMP_LC_ASSERT(
- is_internal_port(dep->cid)
- && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid)));
+ ERTS_LC_ASSERT(is_nil(dep->cid));
ASSERT(!dep->cache);
dep->cache = cp = (ErtsAtomCache*) erts_alloc(ERTS_ALC_T_DCACHE,
sizeof(ErtsAtomCache));
- erts_smp_atomic_inc_nob(&no_caches);
+ erts_atomic_inc_nob(&no_caches);
for (i = 0; i < sizeof(cp->in_arr)/sizeof(cp->in_arr[0]); i++) {
cp->in_arr[i] = THE_NON_VALUE;
cp->out_arr[i] = THE_NON_VALUE;
@@ -171,15 +163,17 @@ create_cache(DistEntry *dep)
Uint erts_dist_cache_size(void)
{
- return (Uint) erts_smp_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache);
+ return (Uint) erts_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache);
}
static ErtsProcList *
-get_suspended_on_de(DistEntry *dep, Uint32 unset_qflgs)
+get_suspended_on_de(DistEntry *dep, erts_aint32_t unset_qflgs)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&dep->qlock));
- dep->qflgs &= ~unset_qflgs;
- if (dep->qflgs & ERTS_DE_QFLG_EXIT) {
+ erts_aint32_t qflgs;
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&dep->qlock));
+ qflgs = erts_atomic32_read_band_acqb(&dep->qflgs, ~unset_qflgs);
+ qflgs &= ~unset_qflgs;
+ if (qflgs & ERTS_DE_QFLG_EXIT) {
/* No resume when exit has been scheduled */
return NULL;
}
@@ -191,6 +185,161 @@ get_suspended_on_de(DistEntry *dep, Uint32 unset_qflgs)
}
}
+#define ERTS_MON_LNK_FIRE_LIMIT 100
+
+static void monitor_connection_down(ErtsMonitor *mon, void *unused)
+{
+ if (erts_monitor_is_origin(mon))
+ erts_proc_sig_send_demonitor(mon);
+ else
+ erts_proc_sig_send_monitor_down(mon, am_noconnection);
+}
+
+static void link_connection_down(ErtsLink *lnk, void *vdist)
+{
+ erts_proc_sig_send_link_exit(NULL, THE_NON_VALUE, lnk,
+ am_noconnection, NIL);
+}
+
+typedef enum {
+ ERTS_CML_CLEANUP_STATE_LINKS,
+ ERTS_CML_CLEANUP_STATE_MONITORS,
+ ERTS_CML_CLEANUP_STATE_ONAME_MONITORS,
+ ERTS_CML_CLEANUP_STATE_NODE_MONITORS
+} ErtsConMonLnkCleaupState;
+
+typedef struct {
+ ErtsConMonLnkCleaupState state;
+ ErtsMonLnkDist *dist;
+ void *yield_state;
+ int trigger_node_monitors;
+ Eterm nodename;
+ Eterm visability;
+ Eterm reason;
+ ErlOffHeap oh;
+ Eterm heap[1];
+} ErtsConMonLnkCleanup;
+
+static void
+con_monitor_link_cleanup(void *vcmlcp)
+{
+ ErtsConMonLnkCleanup *cmlcp = vcmlcp;
+ ErtsMonLnkDist *dist = cmlcp->dist;
+ ErtsSchedulerData *esdp;
+ int yield;
+
+ switch (cmlcp->state) {
+ case ERTS_CML_CLEANUP_STATE_LINKS:
+ yield = erts_link_list_foreach_delete_yielding(&dist->links,
+ link_connection_down,
+ NULL, &cmlcp->yield_state,
+ ERTS_MON_LNK_FIRE_LIMIT);
+ if (yield)
+ break;
+
+ ASSERT(!cmlcp->yield_state);
+ cmlcp->state = ERTS_CML_CLEANUP_STATE_MONITORS;
+ case ERTS_CML_CLEANUP_STATE_MONITORS:
+ yield = erts_monitor_list_foreach_delete_yielding(&dist->monitors,
+ monitor_connection_down,
+ NULL, &cmlcp->yield_state,
+ ERTS_MON_LNK_FIRE_LIMIT);
+ if (yield)
+ break;
+
+ ASSERT(!cmlcp->yield_state);
+ cmlcp->state = ERTS_CML_CLEANUP_STATE_ONAME_MONITORS;
+ case ERTS_CML_CLEANUP_STATE_ONAME_MONITORS:
+ yield = erts_monitor_tree_foreach_delete_yielding(&dist->orig_name_monitors,
+ monitor_connection_down,
+ NULL, &cmlcp->yield_state,
+ ERTS_MON_LNK_FIRE_LIMIT/2);
+ if (yield)
+ break;
+
+ cmlcp->dist = NULL;
+ erts_mon_link_dist_dec_refc(dist);
+
+ ASSERT(!cmlcp->yield_state);
+ cmlcp->state = ERTS_CML_CLEANUP_STATE_NODE_MONITORS;
+ case ERTS_CML_CLEANUP_STATE_NODE_MONITORS:
+ if (cmlcp->trigger_node_monitors) {
+ send_nodes_mon_msgs(NULL,
+ am_nodedown,
+ cmlcp->nodename,
+ cmlcp->visability,
+ cmlcp->reason);
+ }
+ erts_cleanup_offheap(&cmlcp->oh);
+ erts_free(ERTS_ALC_T_CML_CLEANUP, vcmlcp);
+ return; /* done */
+ }
+
+ /* yield... */
+
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp && esdp->type == ERTS_SCHED_NORMAL);
+ erts_schedule_misc_aux_work((int) esdp->no,
+ con_monitor_link_cleanup,
+ (void *) cmlcp);
+}
+
+static void
+schedule_con_monitor_link_cleanup(ErtsMonLnkDist *dist,
+ Eterm nodename,
+ Eterm visability,
+ Eterm reason)
+{
+ if (dist || is_value(nodename)) {
+ ErtsSchedulerData *esdp;
+ ErtsConMonLnkCleanup *cmlcp;
+ Uint rsz, size;
+
+ size = sizeof(ErtsConMonLnkCleanup);
+
+ if (is_non_value(reason) || is_immed(reason)) {
+ rsz = 0;
+ size -= sizeof(Eterm);
+ }
+ else {
+ rsz = size_object(reason);
+ size += sizeof(Eterm) * (rsz - 1);
+ }
+
+ cmlcp = erts_alloc(ERTS_ALC_T_CML_CLEANUP, size);
+
+ ERTS_INIT_OFF_HEAP(&cmlcp->oh);
+
+ cmlcp->yield_state = NULL;
+ cmlcp->dist = dist;
+ if (!dist)
+ cmlcp->state = ERTS_CML_CLEANUP_STATE_NODE_MONITORS;
+ else {
+ cmlcp->state = ERTS_CML_CLEANUP_STATE_LINKS;
+ erts_mtx_lock(&dist->mtx);
+ ASSERT(dist->alive);
+ dist->alive = 0;
+ erts_mtx_unlock(&dist->mtx);
+ }
+
+ cmlcp->trigger_node_monitors = is_value(nodename);
+ cmlcp->nodename = nodename;
+ cmlcp->visability = visability;
+ if (rsz == 0)
+ cmlcp->reason = reason;
+ else {
+ Eterm *hp = &cmlcp->heap[0];
+ cmlcp->reason = copy_struct(reason, rsz, &hp, &cmlcp->oh);
+ }
+
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp && esdp->type == ERTS_SCHED_NORMAL);
+ erts_schedule_misc_aux_work((int) esdp->no,
+ con_monitor_link_cleanup,
+ (void *) cmlcp);
+ }
+}
+
/*
** A full node name constists of a "n@h"
**
@@ -242,185 +391,22 @@ int is_node_name_atom(Eterm a)
return is_node_name((char*)atom_tab(i)->name, atom_tab(i)->len);
}
-typedef struct {
- DistEntry *dep;
- Eterm *lhp;
-} NetExitsContext;
-
-/*
-** This function is called when a distribution
-** port or process terminates
-*/
-static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
-{
- Process *rp;
- ErtsMonitor *rmon;
- DistEntry *dep = ((NetExitsContext *) vnecp)->dep;
- ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
-
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
- if (!rp)
- goto done;
-
- if (mon->type == MON_ORIGIN) {
- /* local pid is beeing monitored */
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
- /* ASSERT(rmon != NULL); nope, can happen during process exit */
- if (rmon != NULL) {
- erts_destroy_monitor(rmon);
- }
- } else {
- DeclareTmpHeapNoproc(lhp,3);
- Eterm watched;
- UseTmpHeapNoproc(3);
- ASSERT(mon->type == MON_TARGET);
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
- /* ASSERT(rmon != NULL); can happen during process exit */
- if (rmon != NULL) {
- ASSERT(is_atom(rmon->name) || is_nil(rmon->name));
- watched = (is_atom(rmon->name)
- ? TUPLE2(lhp, rmon->name, dep->sysname)
- : rmon->pid);
-#ifdef ERTS_SMP
- rp_locks |= ERTS_PROC_LOCKS_MSG_SEND;
- erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND);
-#endif
- erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process,
- watched, am_noconnection);
- erts_destroy_monitor(rmon);
- }
- UnUseTmpHeapNoproc(3);
- }
- erts_smp_proc_unlock(rp, rp_locks);
- done:
- erts_destroy_monitor(mon);
-}
-
-typedef struct {
- NetExitsContext *necp;
- ErtsLink *lnk;
-} LinkNetExitsContext;
-
-/*
-** This is the function actually doing the job of sending exit messages
-** for links in a dist entry upon net_exit (the node goes down), NB,
-** only process links, not node monitors are handled here,
-** they reside in a separate tree....
-*/
-static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp)
-{
- ErtsLink *lnk = ((LinkNetExitsContext *) vlnecp)->lnk; /* the local pid */
- ErtsLink *rlnk;
- Process *rp;
-
- ASSERT(lnk->type == LINK_PID);
- if (is_internal_pid(lnk->pid)) {
- int xres;
- ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND;
-
- rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
- if (!rp) {
- goto done;
- }
-
- rlnk = erts_remove_link(&ERTS_P_LINKS(rp), sublnk->pid);
- xres = erts_send_exit_signal(NULL,
- sublnk->pid,
- rp,
- &rp_locks,
- am_noconnection,
- NIL,
- NULL,
- 0);
-
- if (rlnk) {
- erts_destroy_link(rlnk);
- if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- /* We didn't exit the process and it is traced */
- trace_proc(NULL, 0, rp, am_getting_unlinked, sublnk->pid);
- }
- }
- erts_smp_proc_unlock(rp, rp_locks);
- }
- done:
- erts_destroy_link(sublnk);
-
-}
-
-
-
-
-
-/*
-** This function is called when a distribution
-** port or process terminates, once for each link on the high level,
-** it in turn traverses the link subtree for the specific link node...
-*/
-static void doit_link_net_exits(ErtsLink *lnk, void *vnecp)
-{
- LinkNetExitsContext lnec = {(NetExitsContext *) vnecp, lnk};
- ASSERT(lnk->type == LINK_PID);
- erts_sweep_links(ERTS_LINK_ROOT(lnk), &doit_link_net_exits_sub, (void *) &lnec);
-#ifdef DEBUG
- ERTS_LINK_ROOT(lnk) = NULL;
-#endif
- erts_destroy_link(lnk);
-}
-
-
-static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
-{
- DistEntry *dep = ((NetExitsContext *) vnecp)->dep;
- Eterm name = dep->sysname;
- Process *rp;
- ErtsLink *rlnk;
- Uint i,n;
- ASSERT(lnk->type == LINK_NODE);
- if (is_internal_pid(lnk->pid)) {
- ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
- ErlOffHeap *ohp;
- rp = erts_proc_lookup(lnk->pid);
- if (!rp)
- goto done;
- erts_smp_proc_lock(rp, rp_locks);
- rlnk = erts_remove_link(&ERTS_P_LINKS(rp), name);
- if (rlnk != NULL) {
- ASSERT(is_atom(rlnk->pid) && (rlnk->type == LINK_NODE));
- erts_destroy_link(rlnk);
- }
- n = ERTS_LINK_REFC(lnk);
- for (i = 0; i < n; ++i) {
- Eterm tup;
- Eterm *hp;
- ErtsMessage *msgp;
-
- msgp = erts_alloc_message_heap(rp, &rp_locks,
- 3, &hp, &ohp);
- tup = TUPLE2(hp, am_nodedown, name);
- erts_queue_message(rp, rp_locks, msgp, tup, am_system);
- }
- erts_smp_proc_unlock(rp, rp_locks);
- }
- done:
- erts_destroy_link(lnk);
-}
-
static void
set_node_not_alive(void *unused)
{
ErlHeapFragment *bp;
Eterm nodename = erts_this_dist_entry->sysname;
- ASSERT(erts_smp_atomic_read_nob(&no_nodes) == 0);
+ ASSERT(erts_atomic_read_nob(&no_nodes) == 0);
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
erts_set_this_node(am_Noname, 0);
erts_is_alive = 0;
send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nodedown.reason);
nodedown.reason = NIL;
bp = nodedown.bp;
nodedown.bp = NULL;
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
if (bp)
free_message_buffer(bp);
}
@@ -428,7 +414,7 @@ set_node_not_alive(void *unused)
static ERTS_INLINE void
dec_no_nodes(void)
{
- erts_aint_t no = erts_smp_atomic_dec_read_mb(&no_nodes);
+ erts_aint_t no = erts_atomic_dec_read_mb(&no_nodes);
ASSERT(no >= 0);
ASSERT(erts_get_scheduler_id()); /* Need to be a scheduler */
if (no == 0)
@@ -441,12 +427,33 @@ static ERTS_INLINE void
inc_no_nodes(void)
{
#ifdef DEBUG
- erts_aint_t no = erts_smp_atomic_read_nob(&no_nodes);
+ erts_aint_t no = erts_atomic_read_nob(&no_nodes);
ASSERT(erts_is_alive ? no > 0 : no == 0);
#endif
- erts_smp_atomic_inc_mb(&no_nodes);
+ erts_atomic_inc_mb(&no_nodes);
+}
+
+static void
+kill_dist_ctrl_proc(void *vpid)
+{
+ erts_proc_sig_send_exit(NULL, (Eterm) vpid, (Eterm) vpid,
+ am_kill, NIL, 0);
}
-
+
+static void
+schedule_kill_dist_ctrl_proc(Eterm pid)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ int sched_id = 1;
+ if (!esdp || ERTS_SCHEDULER_IS_DIRTY(esdp))
+ sched_id = 1;
+ else
+ sched_id = (int) esdp->no;
+ erts_schedule_misc_aux_work(sched_id,
+ kill_dist_ctrl_proc,
+ (void *) (UWord) pid);
+}
+
/*
* proc is currently running or exiting process.
*/
@@ -456,58 +463,83 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
if (dep == erts_this_dist_entry) { /* Net kernel has died (clean up!!) */
DistEntry *tdep;
- int no_dist_port = 0;
+ int no_dist_ctrl;
+ int no_pending;
Eterm nd_reason = (reason == am_no_network
? am_no_network
: am_net_kernel_terminated);
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ int i = 0;
+ Eterm *dist_ctrl;
+ DistEntry** pending;
+
+ ERTS_UNDEF(dist_ctrl, NULL);
+ ERTS_UNDEF(pending, NULL);
- for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next)
- no_dist_port++;
- for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next)
- no_dist_port++;
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
+
+ no_dist_ctrl = (erts_no_of_hidden_dist_entries +
+ erts_no_of_visible_dist_entries);
+ no_pending = erts_no_of_pending_dist_entries;
/* KILL all port controllers */
- if (no_dist_port == 0)
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
- else {
- Eterm def_buf[128];
- int i = 0;
- Eterm *dist_port;
-
- if (no_dist_port <= sizeof(def_buf)/sizeof(def_buf[0]))
- dist_port = &def_buf[0];
- else
- dist_port = erts_alloc(ERTS_ALC_T_TMP,
- sizeof(Eterm)*no_dist_port);
+ if (no_dist_ctrl) {
+ dist_ctrl = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(Eterm)*no_dist_ctrl);
for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) {
- ASSERT(is_internal_port(tdep->cid));
- dist_port[i++] = tdep->cid;
+ ASSERT(is_internal_port(tdep->cid) || is_internal_pid(tdep->cid));
+ ASSERT(i < no_dist_ctrl);
+ dist_ctrl[i++] = tdep->cid;
}
for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) {
- ASSERT(is_internal_port(tdep->cid));
- dist_port[i++] = tdep->cid;
+ ASSERT(is_internal_port(tdep->cid) || is_internal_pid(tdep->cid));
+ ASSERT(i < no_dist_ctrl);
+ dist_ctrl[i++] = tdep->cid;
}
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
-
- for (i = 0; i < no_dist_port; i++) {
- Port *prt = erts_port_lookup(dist_port[i],
- ERTS_PORT_SFLGS_INVALID_LOOKUP);
- if (!prt)
- continue;
- ASSERT(erts_atomic32_read_nob(&prt->state)
- & ERTS_PORT_SFLG_DISTRIBUTION);
+ ASSERT(i == no_dist_ctrl);
+ }
+ if (no_pending) {
+ pending = erts_alloc(ERTS_ALC_T_TMP, sizeof(DistEntry*)*no_pending);
+ i = 0;
+ for (tdep = erts_pending_dist_entries; tdep; tdep = tdep->next) {
+ ASSERT(is_nil(tdep->cid));
+ ASSERT(i < no_pending);
+ pending[i++] = tdep;
+ erts_ref_dist_entry(tdep);
+ }
+ ASSERT(i == no_pending);
+ }
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
+
+ if (no_dist_ctrl) {
+ for (i = 0; i < no_dist_ctrl; i++) {
+ if (is_internal_pid(dist_ctrl[i]))
+ schedule_kill_dist_ctrl_proc(dist_ctrl[i]);
+ else {
+ Port *prt = erts_port_lookup(dist_ctrl[i],
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ if (prt) {
+ ASSERT(erts_atomic32_read_nob(&prt->state)
+ & ERTS_PORT_SFLG_DISTRIBUTION);
+
+ erts_port_exit(NULL, ERTS_PORT_SIG_FLG_FORCE_SCHED,
+ prt, dist_ctrl[i], nd_reason, NULL);
+ }
+ }
+ }
+ erts_free(ERTS_ALC_T_TMP, dist_ctrl);
+ }
- erts_port_exit(NULL, ERTS_PORT_SIG_FLG_FORCE_SCHED,
- prt, dist_port[i], nd_reason, NULL);
- }
+ if (no_pending) {
+ for (i = 0; i < no_pending; i++) {
+ abort_connection(pending[i], pending[i]->connection_id);
+ erts_deref_dist_entry(pending[i]);
+ }
+ erts_free(ERTS_ALC_T_TMP, pending);
+ }
- if (dist_port != &def_buf[0])
- erts_free(ERTS_ALC_T_TMP, dist_port);
- }
/*
- * When last dist port exits, node will be taken
+ * When last dist ctrl exits, node will be taken
* from alive to not alive.
*/
ASSERT(is_nil(nodedown.reason) && !nodedown.bp);
@@ -524,65 +556,78 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
&nodedown.bp->off_heap);
}
}
- else { /* Call from distribution port */
- NetExitsContext nec = {dep};
- ErtsLink *nlinks;
- ErtsLink *node_links;
- ErtsMonitor *monitors;
+ else { /* Call from distribution controller (port/process) */
+ ErtsMonLnkDist *mld;
+ ErtsAtomCache *cache;
+ ErtsProcList *suspendees;
+ ErtsDistOutputBuf *obuf;
Uint32 flags;
- erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 1);
- erts_smp_de_rwlock(dep);
+ erts_atomic_set_mb(&dep->dist_cmd_scheduled, 1);
+ erts_de_rwlock(dep);
- ERTS_SMP_LC_ASSERT(is_internal_port(dep->cid)
- && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid)));
+ if (is_internal_port(dep->cid)) {
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid)));
- if (erts_port_task_is_scheduled(&dep->dist_cmd))
- erts_port_task_abort(&dep->dist_cmd);
+ if (erts_port_task_is_scheduled(&dep->dist_cmd))
+ erts_port_task_abort(&dep->dist_cmd);
+ }
- if (dep->status & ERTS_DE_SFLG_EXITING) {
+ if (dep->state == ERTS_DE_STATE_EXITING) {
#ifdef DEBUG
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qflgs & ERTS_DE_QFLG_EXIT);
- erts_smp_mtx_unlock(&dep->qlock);
+ ASSERT(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT);
#endif
}
else {
- dep->status |= ERTS_DE_SFLG_EXITING;
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT));
- dep->qflgs |= ERTS_DE_QFLG_EXIT;
- erts_smp_mtx_unlock(&dep->qlock);
+ dep->state = ERTS_DE_STATE_EXITING;
+ erts_mtx_lock(&dep->qlock);
+ ASSERT(!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT));
+ erts_atomic32_read_bor_relb(&dep->qflgs, ERTS_DE_QFLG_EXIT);
+ erts_mtx_unlock(&dep->qlock);
}
- erts_smp_de_links_lock(dep);
- monitors = dep->monitors;
- nlinks = dep->nlinks;
- node_links = dep->node_links;
- dep->monitors = NULL;
- dep->nlinks = NULL;
- dep->node_links = NULL;
- erts_smp_de_links_unlock(dep);
+ mld = dep->mld;
+ dep->mld = NULL;
nodename = dep->sysname;
flags = dep->flags;
+ erts_atomic_set_nob(&dep->input_handler, (erts_aint_t) NIL);
+ cache = dep->cache;
+ dep->cache = NULL;
+
+ erts_mtx_lock(&dep->qlock);
+
+ erts_atomic64_set_nob(&dep->in, 0);
+ erts_atomic64_set_nob(&dep->out, 0);
+
+ obuf = clear_de_out_queues(dep);
+ suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL);
+
+ erts_mtx_unlock(&dep->qlock);
+ erts_atomic_set_nob(&dep->dist_cmd_scheduled, 0);
+ dep->send = NULL;
+
erts_set_dist_entry_not_connected(dep);
- erts_smp_de_rwunlock(dep);
+ erts_de_rwunlock(dep);
- erts_sweep_monitors(monitors, &doit_monitor_net_exits, (void *) &nec);
- erts_sweep_links(nlinks, &doit_link_net_exits, (void *) &nec);
- erts_sweep_links(node_links, &doit_node_link_net_exits, (void *) &nec);
+ schedule_con_monitor_link_cleanup(mld,
+ nodename,
+ (flags & DFLAG_PUBLISHED
+ ? am_visible
+ : am_hidden),
+ (reason == am_normal
+ ? am_connection_closed
+ : reason));
- send_nodes_mon_msgs(NULL,
- am_nodedown,
- nodename,
- flags & DFLAG_PUBLISHED ? am_visible : am_hidden,
- reason == am_normal ? am_connection_closed : reason);
+ erts_resume_processes(suspendees);
- clear_dist_entry(dep);
+ delete_cache(cache);
+ free_de_out_queues(dep, obuf);
+ if (dep->transcode_ctx)
+ transcode_free_ctx(dep);
}
dec_no_nodes();
@@ -596,6 +641,14 @@ trap_function(Eterm func, int arity)
return erts_export_put(am_erlang, func, arity);
}
+/*
+ * Sync with dist_util.erl:
+ *
+ * -record(erts_dflags,
+ * {default, mandatory, addable, rejectable, strict_order}).
+ */
+static Eterm erts_dflags_record;
+
void init_dist(void)
{
init_nodes_monitors();
@@ -603,19 +656,24 @@ void init_dist(void)
nodedown.reason = NIL;
nodedown.bp = NULL;
- erts_smp_atomic_init_nob(&no_nodes, 0);
- erts_smp_atomic_init_nob(&no_caches, 0);
+ erts_atomic_init_nob(&no_nodes, 0);
+ erts_atomic_init_nob(&no_caches, 0);
/* Lookup/Install all references to trap functions */
- dsend2_trap = trap_function(am_dsend,2);
- dsend3_trap = trap_function(am_dsend,3);
- /* dsend_nosuspend_trap = trap_function(am_dsend_nosuspend,2);*/
- dlink_trap = trap_function(am_dlink,1);
- dunlink_trap = trap_function(am_dunlink,1);
dmonitor_node_trap = trap_function(am_dmonitor_node,3);
- dgroup_leader_trap = trap_function(am_dgroup_leader,2);
- dexit_trap = trap_function(am_dexit, 2);
- dmonitor_p_trap = trap_function(am_dmonitor_p, 2);
+ dist_ctrl_put_data_trap = erts_export_put(am_erts_internal,
+ am_dist_ctrl_put_data,
+ 2);
+ {
+ Eterm* hp = erts_alloc(ERTS_ALC_T_LITERAL, (1+6)*sizeof(Eterm));
+ erts_dflags_record = TUPLE6(hp, am_erts_dflags,
+ make_small(DFLAG_DIST_DEFAULT),
+ make_small(DFLAG_DIST_MANDATORY),
+ make_small(DFLAG_DIST_ADDABLE),
+ make_small(DFLAG_DIST_REJECTABLE),
+ make_small(DFLAG_DIST_STRICT_ORDER));
+ erts_set_literal_tag(&erts_dflags_record, hp, (1+6));
+ }
}
#define ErtsDistOutputBuf2Binary(OB) \
@@ -627,10 +685,10 @@ alloc_dist_obuf(Uint size)
ErtsDistOutputBuf *obuf;
Uint obuf_size = sizeof(ErtsDistOutputBuf)+sizeof(byte)*(size-1);
Binary *bin = erts_bin_drv_alloc(obuf_size);
- erts_refc_init(&bin->refc, 1);
obuf = (ErtsDistOutputBuf *) &bin->orig_bytes[0];
#ifdef DEBUG
obuf->dbg_pattern = ERTS_DIST_OUTPUT_BUF_DBG_PATTERN;
+ obuf->alloc_endp = obuf->data + size;
ASSERT(bin == ErtsDistOutputBuf2Binary(obuf));
#endif
return obuf;
@@ -641,8 +699,7 @@ free_dist_obuf(ErtsDistOutputBuf *obuf)
{
Binary *bin = ErtsDistOutputBuf2Binary(obuf);
ASSERT(obuf->dbg_pattern == ERTS_DIST_OUTPUT_BUF_DBG_PATTERN);
- if (erts_refc_dectest(&bin->refc, 0) == 0)
- erts_bin_free(bin);
+ erts_bin_release(bin);
}
static ERTS_INLINE Sint
@@ -652,26 +709,11 @@ size_obuf(ErtsDistOutputBuf *obuf)
return bin->orig_size;
}
-static void clear_dist_entry(DistEntry *dep)
+static ErtsDistOutputBuf* clear_de_out_queues(DistEntry* dep)
{
- Sint obufsize = 0;
- ErtsAtomCache *cache;
- ErtsProcList *suspendees;
ErtsDistOutputBuf *obuf;
- erts_smp_de_rwlock(dep);
- cache = dep->cache;
- dep->cache = NULL;
-
-#ifdef DEBUG
- erts_smp_de_links_lock(dep);
- ASSERT(!dep->nlinks);
- ASSERT(!dep->node_links);
- ASSERT(!dep->monitors);
- erts_smp_de_links_unlock(dep);
-#endif
-
- erts_smp_mtx_lock(&dep->qlock);
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&dep->qlock));
if (!dep->out_queue.last)
obuf = dep->finalized_out_queue.first;
@@ -680,21 +722,24 @@ static void clear_dist_entry(DistEntry *dep)
obuf = dep->out_queue.first;
}
+ if (dep->tmp_out_queue.first) {
+ dep->tmp_out_queue.last->next = obuf;
+ obuf = dep->tmp_out_queue.first;
+ }
+
dep->out_queue.first = NULL;
dep->out_queue.last = NULL;
+ dep->tmp_out_queue.first = NULL;
+ dep->tmp_out_queue.last = NULL;
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
- dep->status = 0;
- suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL);
- erts_smp_mtx_unlock(&dep->qlock);
- erts_smp_atomic_set_nob(&dep->dist_cmd_scheduled, 0);
- dep->send = NULL;
- erts_smp_de_rwunlock(dep);
-
- erts_resume_processes(suspendees);
+ return obuf;
+}
- delete_cache(cache);
+static void free_de_out_queues(DistEntry* dep, ErtsDistOutputBuf *obuf)
+{
+ Sint obufsize = 0;
while (obuf) {
ErtsDistOutputBuf *fobuf;
@@ -705,14 +750,15 @@ static void clear_dist_entry(DistEntry *dep)
}
if (obufsize) {
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qsize >= obufsize);
- dep->qsize -= obufsize;
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_lock(&dep->qlock);
+ ASSERT(erts_atomic_read_nob(&dep->qsize) >= obufsize);
+ erts_atomic_add_nob(&dep->qsize,
+ (erts_aint_t) -obufsize);
+ erts_mtx_unlock(&dep->qlock);
}
}
-void erts_dsend_context_dtor(Binary* ctx_bin)
+int erts_dsend_context_dtor(Binary* ctx_bin)
{
ErtsSendContext* ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
switch (ctx->dss.phase) {
@@ -727,8 +773,10 @@ void erts_dsend_context_dtor(Binary* ctx_bin)
if (ctx->dss.phase >= ERTS_DSIG_SEND_PHASE_ALLOC && ctx->dss.obuf) {
free_dist_obuf(ctx->dss.obuf);
}
- if (ctx->dep_to_deref)
- erts_deref_dist_entry(ctx->dep_to_deref);
+ if (ctx->deref_dep)
+ erts_deref_dist_entry(ctx->dep);
+
+ return 1;
}
Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
@@ -740,7 +788,7 @@ Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
Binary* ctx_bin = erts_create_magic_binary(sizeof(struct exported_ctx),
erts_dsend_context_dtor);
struct exported_ctx* dst = ERTS_MAGIC_BIN_DATA(ctx_bin);
- Eterm* hp = HAlloc(p, PROC_BIN_SIZE);
+ Eterm* hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
sys_memcpy(&dst->ctx, ctx, sizeof(ErtsSendContext));
ASSERT(ctx->dss.ctl == make_tuple(ctx->ctl_heap));
@@ -749,7 +797,7 @@ Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx)
sys_memcpy(&dst->acm, ctx->dss.acmp, sizeof(ErtsAtomCacheMap));
dst->ctx.dss.acmp = &dst->acm;
}
- return erts_mk_magic_binary_term(&hp, &MSO(p), ctx_bin);
+ return erts_mk_magic_ref(&hp, &MSO(p), ctx_bin);
}
@@ -794,9 +842,8 @@ erts_dsig_send_unlink(ErtsDSigData *dsdp, Eterm local, Eterm remote)
}
-/* A local process that's beeing monitored by a remote one exits. We send:
- {DOP_MONITOR_P_EXIT, Local pid or name, Remote pid, ref, reason},
- which is rather sad as only the ref is needed, no pid's... */
+/* A local process that's being monitored by a remote one exits. We send:
+ {DOP_MONITOR_P_EXIT, Local pid or name, Remote pid, ref, reason} */
int
erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
Eterm ref, Eterm reason)
@@ -805,17 +852,18 @@ erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
DeclareTmpHeapNoproc(ctl_heap,6);
int res;
+ if (~dsdp->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
+ /*
+ * Receiver does not support DOP_MONITOR_P_EXIT (see dsig_send_monitor)
+ */
+ return ERTS_DSIG_SEND_OK;
+ }
+
UseTmpHeapNoproc(6);
ctl = TUPLE5(&ctl_heap[0], make_small(DOP_MONITOR_P_EXIT),
watched, watcher, ref, reason);
-#ifdef DEBUG
- erts_smp_de_links_lock(dsdp->dep);
- ASSERT(!erts_lookup_monitor(dsdp->dep->monitors, ref));
- erts_smp_de_links_unlock(dsdp->dep);
-#endif
-
res = dsig_send_ctl(dsdp, ctl, 1);
UnUseTmpHeapNoproc(6);
return res;
@@ -832,6 +880,16 @@ erts_dsig_send_monitor(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
DeclareTmpHeapNoproc(ctl_heap,5);
int res;
+ if (~dsdp->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
+ /*
+ * Receiver does not support DOP_MONITOR_P.
+ * Just avoid sending it and by doing that reduce this monitor
+ * to only supervise the connection. This will work for simple c-nodes
+ * with a 1-to-1 relation between "Erlang process" and OS-process.
+ */
+ return ERTS_DSIG_SEND_OK;
+ }
+
UseTmpHeapNoproc(5);
ctl = TUPLE4(&ctl_heap[0],
make_small(DOP_MONITOR_P),
@@ -844,8 +902,7 @@ erts_dsig_send_monitor(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
/* A local process monitoring a remote one wants to stop monitoring, either
because of a demonitor bif call or because the local process died. We send
- {DOP_DEMONITOR_P, Local pid, Remote pid or name, ref}, which is once again
- rather redundant as only the ref will be needed on the other side... */
+ {DOP_DEMONITOR_P, Local pid, Remote pid or name, ref} */
int
erts_dsig_send_demonitor(ErtsDSigData *dsdp, Eterm watcher,
Eterm watched, Eterm ref, int force)
@@ -854,6 +911,13 @@ erts_dsig_send_demonitor(ErtsDSigData *dsdp, Eterm watcher,
DeclareTmpHeapNoproc(ctl_heap,5);
int res;
+ if (~dsdp->flags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)) {
+ /*
+ * Receiver does not support DOP_DEMONITOR_P (see dsig_send_monitor)
+ */
+ return ERTS_DSIG_SEND_OK;
+ }
+
UseTmpHeapNoproc(5);
ctl = TUPLE4(&ctl_heap[0],
make_small(DOP_DEMONITOR_P),
@@ -864,6 +928,24 @@ erts_dsig_send_demonitor(ErtsDSigData *dsdp, Eterm watcher,
return res;
}
+static int can_send_seqtrace_token(ErtsSendContext* ctx, Eterm token) {
+ Eterm label;
+
+ if (ctx->dsd.flags & DFLAG_BIG_SEQTRACE_LABELS) {
+ /* The other end is capable of handling arbitrary seq_trace labels. */
+ return 1;
+ }
+
+ /* The other end only tolerates smalls, but since we could potentially be
+ * talking to an old 32-bit emulator from a 64-bit one, we have to check
+ * whether the label is small on any emulator. */
+ label = SEQ_TRACE_T_LABEL(token);
+
+ return is_small(label) &&
+ signed_val(label) <= (ERTS_SINT32_MAX >> _TAG_IMMED1_SIZE) &&
+ signed_val(label) >= (ERTS_SINT32_MIN >> _TAG_IMMED1_SIZE);
+}
+
int
erts_dsig_send_msg(Eterm remote, Eterm message, ErtsSendContext* ctx)
{
@@ -897,18 +979,38 @@ erts_dsig_send_msg(Eterm remote, Eterm message, ErtsSendContext* ctx)
"%T", remote);
msize = size_object(message);
if (have_seqtrace(token)) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
+ tok_label = SEQ_TRACE_T_DTRACE_LABEL(token);
tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
}
}
#endif
- if (token != NIL)
- ctl = TUPLE4(&ctx->ctl_heap[0],
- make_small(DOP_SEND_TT), am_Empty, remote, token);
- else
- ctl = TUPLE3(&ctx->ctl_heap[0], make_small(DOP_SEND), am_Empty, remote);
+ {
+ Eterm dist_op, sender_id;
+ int send_token;
+
+ send_token = (token != NIL && can_send_seqtrace_token(ctx, token));
+
+ if (ctx->dsd.flags & DFLAG_SEND_SENDER) {
+ dist_op = make_small(send_token ?
+ DOP_SEND_SENDER_TT :
+ DOP_SEND_SENDER);
+ sender_id = sender->common.id;
+ } else {
+ dist_op = make_small(send_token ?
+ DOP_SEND_TT :
+ DOP_SEND);
+ sender_id = am_Empty;
+ }
+
+ if (send_token) {
+ ctl = TUPLE4(&ctx->ctl_heap[0], dist_op, sender_id, remote, token);
+ } else {
+ ctl = TUPLE3(&ctx->ctl_heap[0], dist_op, sender_id, remote);
+ }
+ }
+
DTRACE6(message_send, sender_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
@@ -954,19 +1056,20 @@ erts_dsig_send_reg_msg(Eterm remote_name, Eterm message,
"{%T,%s}", remote_name, node_name);
msize = size_object(message);
if (have_seqtrace(token)) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
+ tok_label = SEQ_TRACE_T_DTRACE_LABEL(token);
tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
}
}
#endif
- if (token != NIL)
+ if (token != NIL && can_send_seqtrace_token(ctx, token))
ctl = TUPLE5(&ctx->ctl_heap[0], make_small(DOP_REG_SEND_TT),
sender->common.id, am_Empty, remote_name, token);
else
ctl = TUPLE4(&ctx->ctl_heap[0], make_small(DOP_REG_SEND),
sender->common.id, am_Empty, remote_name);
+
DTRACE6(message_send, sender_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
@@ -1018,7 +1121,7 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
erts_snprintf(reason_str, sizeof(DTRACE_CHARBUF_NAME(reason_str)),
"%T", reason);
if (have_seqtrace(token)) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
+ tok_label = SEQ_TRACE_T_DTRACE_LABEL(token);
tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
}
@@ -1088,21 +1191,8 @@ erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote)
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>
-#ifndef HAVE_VALGRIND_PRINTF_XML
-#define VALGRIND_PRINTF_XML VALGRIND_PRINTF
-#endif
-
# define PURIFY_MSG(msg) \
- do { \
- char buf__[1]; size_t bufsz__ = sizeof(buf__); \
- if (erts_sys_getenv_raw("VALGRIND_LOG_XML", buf__, &bufsz__) >= 0) { \
- VALGRIND_PRINTF_XML("<erlang_error_log>" \
- "%s, line %d: %s</erlang_error_log>\n", \
- __FILE__, __LINE__, msg); \
- } else { \
- VALGRIND_PRINTF("%s, line %d: %s", __FILE__, __LINE__, msg); \
- } \
- } while (0)
+ VALGRIND_PRINTF("%s, line %d: %s", __FILE__, __LINE__, msg)
#else
# define PURIFY_MSG(msg)
#endif
@@ -1119,13 +1209,13 @@ erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote)
int erts_net_message(Port *prt,
DistEntry *dep,
+ Uint32 conn_id,
byte *hbuf,
ErlDrvSizeT hlen,
byte *buf,
ErlDrvSizeT len)
{
ErtsDistExternal ede;
- byte *t;
Sint ctl_len;
Eterm arg;
Eterm from, to;
@@ -1141,8 +1231,6 @@ int erts_net_message(Port *prt,
Sint type;
Eterm token;
Eterm token_size;
- ErtsMonitor *mon;
- ErtsLink *lnk;
Uint tuple_arity;
int res;
#ifdef ERTS_DIST_MSG_DBG
@@ -1151,16 +1239,17 @@ int erts_net_message(Port *prt,
UseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt));
if (!erts_is_alive) {
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
return 0;
}
- if (hlen != 0)
- goto data_error;
+
+ ASSERT(hlen == 0);
+
if (len == 0) { /* HANDLE TICK !!! */
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
return 0;
@@ -1171,38 +1260,31 @@ int erts_net_message(Port *prt,
bw(buf, len);
#endif
- if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)
- t = buf;
- else {
- /* Skip PASS_THROUGH */
- t = buf+1;
- len--;
- }
+ res = erts_prepare_dist_ext(&ede, buf, len, dep, conn_id, dep->cache);
- if (len == 0) {
- PURIFY_MSG("data error");
- goto data_error;
- }
-
- res = erts_prepare_dist_ext(&ede, t, len, dep, dep->cache);
-
- if (res >= 0)
- res = ctl_len = erts_decode_dist_ext_size(&ede);
- else {
+ switch (res) {
+ case ERTS_PREP_DIST_EXT_CLOSED:
+ return 0; /* Connection not alive; ignore signal... */
+ case ERTS_PREP_DIST_EXT_FAILED:
#ifdef ERTS_DIST_MSG_DBG
erts_fprintf(stderr, "DIST MSG DEBUG: erts_prepare_dist_ext() failed:\n");
bw(buf, orig_len);
#endif
- ctl_len = 0;
- }
-
- if (res < 0) {
+ goto data_error;
+ case ERTS_PREP_DIST_EXT_SUCCESS:
+ ctl_len = erts_decode_dist_ext_size(&ede);
+ if (ctl_len < 0) {
#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n");
- bw(buf, orig_len);
+ erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n");
+ bw(buf, orig_len);
#endif
- PURIFY_MSG("data error");
- goto data_error;
+ PURIFY_MSG("data error");
+ goto data_error;
+ }
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected result from erts_prepare_dist_ext()");
+ break;
}
if (ctl_len > DIST_CTL_DEFAULT_SIZE) {
@@ -1220,10 +1302,9 @@ int erts_net_message(Port *prt,
PURIFY_MSG("data error");
goto decode_error;
}
- ctl_len = t - buf;
#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, "<<%s CTL: %T\n", len != orig_len ? "P" : " ", arg);
+ erts_fprintf(stderr, "<< CTL: %T\n", arg);
#endif
if (is_not_tuple(arg) ||
@@ -1233,90 +1314,92 @@ int erts_net_message(Port *prt,
}
token_size = 0;
+ token = NIL;
switch (type = unsigned_val(tuple[1])) {
- case DOP_LINK:
+ case DOP_LINK: {
+ ErtsDSigData dsd;
+ int code;
+
if (tuple_arity != 3) {
goto invalid_message;
}
from = tuple[2];
to = tuple[3]; /* local proc to link to */
- if (is_not_pid(from) || is_not_pid(to)) {
- goto invalid_message;
- }
+ if (is_not_external_pid(from))
+ goto invalid_message;
- rp = erts_pid2proc_opt(NULL, 0,
- to, ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- if (!rp) {
- /* This is tricky (we MUST force a distributed send) */
- ErtsDSigData dsd;
- int code;
- code = erts_dsig_prepare(&dsd, dep, NULL, ERTS_DSP_NO_LOCK, 0);
- if (code == ERTS_DSIG_PREP_CONNECTED) {
- code = erts_dsig_send_exit(&dsd, to, from, am_noproc);
- ASSERT(code == ERTS_DSIG_SEND_OK);
- }
- break;
- }
+ if (dep != external_pid_dist_entry(from))
+ goto invalid_message;
- erts_smp_de_links_lock(dep);
- res = erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, from);
+ if (is_external_pid(to)) {
+ if (external_pid_dist_entry(to) != erts_this_dist_entry)
+ goto invalid_message;
+ /* old incarnation of node; reply noproc... */
+ }
+ else if (is_internal_pid(to)) {
+ ErtsLinkData *ldp = erts_link_create(ERTS_LNK_TYPE_DIST_PROC,
+ from, to);
+ ASSERT(ldp->a.other.item == to);
+ ASSERT(eq(ldp->b.other.item, from));
+#ifdef DEBUG
+ code =
+#endif
+ erts_link_dist_insert(&ldp->a, dep->mld);
+ ASSERT(code);
- if (res < 0) {
- /* It was already there! Lets skip the rest... */
- erts_smp_de_links_unlock(dep);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- break;
- }
- lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->common.id);
- erts_add_link(&(ERTS_LINK_ROOT(lnk)), LINK_PID, from);
- erts_smp_de_links_unlock(dep);
+ if (erts_proc_sig_send_link(NULL, to, &ldp->b))
+ break; /* done */
+
+ /* Failed to send signal; cleanup and reply noproc... */
+
+#ifdef DEBUG
+ code =
+#endif
+ erts_link_dist_delete(&ldp->a);
+ ASSERT(code);
+ erts_link_release_both(ldp);
+ }
- if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(NULL, 0, rp, am_getting_linked, from);
+ code = erts_dsig_prepare(&dsd, dep, NULL, 0, ERTS_DSP_NO_LOCK, 0, 0);
+ if (code == ERTS_DSIG_PREP_CONNECTED) {
+ code = erts_dsig_send_exit(&dsd, to, from, am_noproc);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
break;
+ }
case DOP_UNLINK: {
- ErtsDistLinkData dld;
if (tuple_arity != 3) {
goto invalid_message;
}
from = tuple[2];
to = tuple[3];
- if (is_not_pid(from) || is_not_pid(to)) {
+ if (is_not_external_pid(from))
+ goto invalid_message;
+ if (dep != external_pid_dist_entry(from))
goto invalid_message;
- }
-
- rp = erts_pid2proc_opt(NULL, 0,
- to, ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- if (!rp)
- break;
-
- lnk = erts_remove_link(&ERTS_P_LINKS(rp), from);
- if (IS_TRACED_FL(rp, F_TRACE_PROCS) && lnk != NULL) {
- trace_proc(NULL, 0, rp, am_getting_unlinked, from);
- }
+ if (is_external_pid(to)
+ && erts_this_dist_entry == external_pid_dist_entry(from))
+ break;
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (is_not_internal_pid(to))
+ goto invalid_message;
- erts_remove_dist_link(&dld, to, from, dep);
- erts_destroy_dist_link(&dld);
- if (lnk)
- erts_destroy_link(lnk);
+ erts_proc_sig_send_dist_unlink(dep, from, to);
break;
}
case DOP_MONITOR_P: {
/* A remote process wants to monitor us, we get:
{DOP_MONITOR_P, Remote pid, local pid or name, ref} */
- Eterm name;
-
+ Eterm pid, name;
+ ErtsDSigData dsd;
+ int code;
+
if (tuple_arity != 4) {
goto invalid_message;
}
@@ -1325,44 +1408,58 @@ int erts_net_message(Port *prt,
watched = tuple[3]; /* local proc to monitor */
ref = tuple[4];
- if (is_not_ref(ref)) {
+ if (is_not_external_pid(watcher))
+ goto invalid_message;
+ else if (external_pid_dist_entry(watcher) != dep)
+ goto invalid_message;
+
+ if (is_not_ref(ref))
goto invalid_message;
- }
- if (is_atom(watched)) {
- name = watched;
- rp = erts_whereis_process(NULL, 0,
- watched, ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- }
- else {
- name = NIL;
- rp = erts_pid2proc_opt(NULL, 0,
- watched, ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- }
+ if (is_internal_pid(watched)) {
+ name = NIL;
+ pid = watched;
+ }
+ else if (is_atom(watched)) {
+ name = watched;
+ pid = erts_whereis_name_to_id(NULL, watched);
+ /* if port or undefined; reply noproc... */
+ }
+ else if (is_external_pid(watched)
+ && external_pid_dist_entry(watched) == erts_this_dist_entry) {
+ name = NIL;
+ pid = am_undefined; /* old incarnation; reply noproc... */
+ }
+ else
+ goto invalid_message;
- if (!rp) {
- ErtsDSigData dsd;
- int code;
- code = erts_dsig_prepare(&dsd, dep, NULL, ERTS_DSP_NO_LOCK, 0);
- if (code == ERTS_DSIG_PREP_CONNECTED) {
- code = erts_dsig_send_m_exit(&dsd, watcher, watched, ref,
- am_noproc);
- ASSERT(code == ERTS_DSIG_SEND_OK);
- }
- }
- else {
- if (is_atom(watched))
- watched = rp->common.id;
- erts_smp_de_links_lock(dep);
- erts_add_monitor(&(dep->monitors), MON_ORIGIN, ref, watched, name);
- erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, watcher, name);
- erts_smp_de_links_unlock(dep);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- }
+ if (is_internal_pid(pid)) {
+ ErtsMonitorData *mdp;
+ mdp = erts_monitor_create(ERTS_MON_TYPE_DIST_PROC,
+ ref, watcher, pid, name);
- break;
+ code = erts_monitor_dist_insert(&mdp->origin, dep->mld);
+ ASSERT(code); (void)code;
+
+ if (erts_proc_sig_send_monitor(&mdp->target, pid))
+ break; /* done */
+
+ /* Failed to send to local proc; cleanup reply noproc... */
+
+ code = erts_monitor_dist_delete(&mdp->origin);
+ ASSERT(code); (void)code;
+ erts_monitor_release_both(mdp);
+
+ }
+
+ code = erts_dsig_prepare(&dsd, dep, NULL, 0, ERTS_DSP_NO_LOCK, 0, 0);
+ if (code == ERTS_DSIG_PREP_CONNECTED) {
+ code = erts_dsig_send_m_exit(&dsd, watcher, watched, ref,
+ am_noproc);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+
+ break;
}
case DOP_DEMONITOR_P:
@@ -1373,36 +1470,43 @@ int erts_net_message(Port *prt,
if (tuple_arity != 4) {
goto invalid_message;
}
- /* watcher = tuple[2]; */
- /* watched = tuple[3]; May be an atom in case of monitor name */
+
+ watcher = tuple[2];
+ watched = tuple[3];
ref = tuple[4];
- if(is_not_ref(ref)) {
+ if (is_not_ref(ref)) {
goto invalid_message;
}
- erts_smp_de_links_lock(dep);
- mon = erts_remove_monitor(&(dep->monitors),ref);
- erts_smp_de_links_unlock(dep);
- /* ASSERT(mon != NULL); can happen in case of broken dist message */
- if (mon == NULL) {
- break;
- }
- watched = mon->pid;
- erts_destroy_monitor(mon);
- rp = erts_pid2proc_opt(NULL, 0,
- watched, ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- if (!rp) {
- break;
- }
- mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- ASSERT(mon != NULL);
- if (mon == NULL) {
- break;
- }
- erts_destroy_monitor(mon);
+ if (is_not_external_pid(watcher) || external_pid_dist_entry(watcher) != dep)
+ goto invalid_message;
+
+ if (is_internal_pid(watched))
+ erts_proc_sig_send_dist_demonitor(watched, ref);
+ else if (is_external_pid(watched)
+ && external_pid_dist_entry(watched) == erts_this_dist_entry) {
+ /* old incarnation; ignore it */
+ ;
+ }
+ else if (is_atom(watched)) {
+ ErtsMonLnkDist *mld = dep->mld;
+ ErtsMonitor *mon;
+
+ erts_mtx_lock(&mld->mtx);
+
+ mon = erts_monitor_tree_lookup(mld->orig_name_monitors, ref);
+ if (mon)
+ erts_monitor_tree_delete(&mld->orig_name_monitors, mon);
+
+ erts_mtx_unlock(&mld->mtx);
+
+ if (mon)
+ erts_proc_sig_send_demonitor(mon);
+ }
+ else
+ goto invalid_message;
+
break;
case DOP_REG_SEND_TT:
@@ -1458,42 +1562,56 @@ int erts_net_message(Port *prt,
erts_queue_dist_message(rp, locks, ede_copy, token, from);
if (locks)
- erts_smp_proc_unlock(rp, locks);
+ erts_proc_unlock(rp, locks);
}
break;
+ case DOP_SEND_SENDER_TT: {
+ Uint xsize;
case DOP_SEND_TT:
+
if (tuple_arity != 4) {
goto invalid_message;
}
-
- token_size = size_object(tuple[4]);
- /* Fall through ... */
+
+ token = tuple[4];
+ token_size = size_object(token);
+ xsize = ERTS_HEAP_FRAG_SIZE(token_size);
+ goto send_common;
+
+ case DOP_SEND_SENDER:
case DOP_SEND:
+
+ token = NIL;
+ xsize = 0;
+ if (tuple_arity != 3)
+ goto invalid_message;
+
+ send_common:
+
/*
- * There is intentionally no testing of the cookie (it is always '')
- * from R9B and onwards.
+ * If DOP_SEND_SENDER or DOP_SEND_SENDER_TT element 2 contains
+ * the sender pid (i.e. DFLAG_SEND_SENDER is set); otherwise,
+ * the atom '' (empty cookie).
*/
+ ASSERT((type == DOP_SEND_SENDER || type == DOP_SEND_SENDER_TT)
+ ? (is_pid(tuple[2]) && (dep->flags & DFLAG_SEND_SENDER))
+ : tuple[2] == am_Empty);
+
#ifdef ERTS_DIST_MSG_DBG
dist_msg_dbg(&ede, "MSG", buf, orig_len);
#endif
- if (type != DOP_SEND_TT && tuple_arity != 3) {
- goto invalid_message;
- }
to = tuple[3];
if (is_not_pid(to)) {
goto invalid_message;
}
rp = erts_proc_lookup(to);
if (rp) {
- Uint xsize = type == DOP_SEND ? 0 : ERTS_HEAP_FRAG_SIZE(token_size);
ErtsProcLocks locks = 0;
ErtsDistExternal *ede_copy;
ede_copy = erts_make_dist_ext_copy(&ede, xsize);
- if (type == DOP_SEND) {
- token = NIL;
- } else {
+ if (is_not_nil(token)) {
ErlHeapFragment *heap_frag;
ErlOffHeap *ohp;
ASSERT(xsize);
@@ -1501,82 +1619,50 @@ int erts_net_message(Port *prt,
ERTS_INIT_HEAP_FRAG(heap_frag, token_size, token_size);
hp = heap_frag->mem;
ohp = &heap_frag->off_heap;
- token = tuple[4];
token = copy_struct(token, token_size, &hp, ohp);
}
- erts_queue_dist_message(rp, locks, ede_copy, token, tuple[2]);
+ erts_queue_dist_message(rp, locks, ede_copy, token, am_Empty);
if (locks)
- erts_smp_proc_unlock(rp, locks);
+ erts_proc_unlock(rp, locks);
}
break;
+ }
case DOP_MONITOR_P_EXIT: {
/* We are monitoring a process on the remote node which dies, we get
{DOP_MONITOR_P_EXIT, Remote pid or name, Local pid, ref, reason} */
-
- DeclareTmpHeapNoproc(lhp,3);
- Eterm sysname;
- ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_MSG_SEND|ERTS_PROC_LOCK_LINK;
-
if (tuple_arity != 5) {
goto invalid_message;
}
- /* watched = tuple[2]; */ /* remote proc which died */
- /* watcher = tuple[3]; */
+ watched = tuple[2]; /* remote proc or name which died */
+ watcher = tuple[3];
ref = tuple[4];
reason = tuple[5];
- if(is_not_ref(ref)) {
+ if (is_not_ref(ref))
goto invalid_message;
- }
- erts_smp_de_links_lock(dep);
- sysname = dep->sysname;
- mon = erts_remove_monitor(&(dep->monitors), ref);
- /*
- * If demonitor was performed at the same time as the
- * monitored process exits, monitoring side will have
- * removed info about monitor. In this case, do nothing
- * and everything will be as it should.
- */
- erts_smp_de_links_unlock(dep);
- if (mon == NULL) {
- break;
- }
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ if (is_not_external_pid(watched) && is_not_atom(watched))
+ goto invalid_message;
- erts_destroy_monitor(mon);
- if (rp == NULL) {
- break;
- }
-
- mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
+ if (is_not_internal_pid(watcher)) {
+ if (!is_external_pid(watcher))
+ goto invalid_message;
+ if (erts_this_dist_entry == external_pid_dist_entry(watcher))
+ break;
+ goto invalid_message;
+ }
- if (mon == NULL) {
- erts_smp_proc_unlock(rp, rp_locks);
- break;
- }
- UseTmpHeapNoproc(3);
-
- watched = (is_not_nil(mon->name)
- ? TUPLE2(&lhp[0], mon->name, sysname)
- : mon->pid);
-
- erts_queue_monitor_message(rp, &rp_locks,
- ref, am_process, watched, reason);
- erts_smp_proc_unlock(rp, rp_locks);
- erts_destroy_monitor(mon);
- UnUseTmpHeapNoproc(3);
+ erts_proc_sig_send_dist_monitor_down(dep, ref, watched,
+ watcher, reason);
break;
}
case DOP_EXIT_TT:
case DOP_EXIT: {
- ErtsDistLinkData dld;
- ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND;
/* 'from', which 'to' is linked to, died */
if (type == DOP_EXIT) {
if (tuple_arity != 4) {
@@ -1596,56 +1682,19 @@ int erts_net_message(Port *prt,
token = tuple[4];
reason = tuple[5];
}
- if (is_not_pid(from) || is_not_internal_pid(to)) {
+ if (is_not_external_pid(from)
+ || dep != external_pid_dist_entry(from)
+ || is_not_internal_pid(to)) {
goto invalid_message;
}
- rp = erts_pid2proc(NULL, 0, to, rp_locks);
- if (!rp)
- lnk = NULL;
- else {
- lnk = erts_remove_link(&ERTS_P_LINKS(rp), from);
-
- /* If lnk == NULL, we have unlinked on this side, i.e.
- * ignore exit.
- */
- if (lnk) {
- int xres;
-#if 0
- /* Arndt: Maybe it should never be 'kill', but it can be,
- namely when a linked process does exit(kill). Until we know
- whether that is incorrect and what should happen instead,
- we leave the assertion out. */
- ASSERT(reason != am_kill); /* should never be kill (killed) */
-#endif
- xres = erts_send_exit_signal(NULL,
- from,
- rp,
- &rp_locks,
- reason,
- token,
- NULL,
- ERTS_XSIG_FLG_IGN_KILL);
- if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- /* We didn't exit the process and it is traced */
- if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
- rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
- }
- trace_proc(NULL, 0, rp, am_getting_unlinked, from);
- }
- }
- erts_smp_proc_unlock(rp, rp_locks);
- }
- erts_remove_dist_link(&dld, to, from, dep);
- if (lnk)
- erts_destroy_link(lnk);
- erts_destroy_dist_link(&dld);
+ erts_proc_sig_send_dist_link_exit(dep,
+ from, to,
+ reason, token);
break;
}
case DOP_EXIT2_TT:
- case DOP_EXIT2: {
- ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ case DOP_EXIT2:
/* 'from' is send an exit signal to 'to' */
if (type == DOP_EXIT2) {
if (tuple_arity != 4) {
@@ -1667,20 +1716,10 @@ int erts_net_message(Port *prt,
if (is_not_pid(from) || is_not_internal_pid(to)) {
goto invalid_message;
}
- rp = erts_pid2proc(NULL, 0, to, rp_locks);
- if (rp) {
- (void) erts_send_exit_signal(NULL,
- from,
- rp,
- &rp_locks,
- reason,
- token,
- NULL,
- 0);
- erts_smp_proc_unlock(rp, rp_locks);
- }
+
+ erts_proc_sig_send_exit(NULL, from, to, reason, token, 0);
break;
- }
+
case DOP_GROUP_LEADER:
if (tuple_arity != 3) {
goto invalid_message;
@@ -1691,11 +1730,7 @@ int erts_net_message(Port *prt,
goto invalid_message;
}
- rp = erts_pid2proc(NULL, 0, to, ERTS_PROC_LOCK_MAIN);
- if (!rp)
- break;
- rp->group_leader = STORE_NC_IN_PROC(rp, from);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ (void) erts_proc_sig_send_group_leader(NULL, to, from, NIL);
break;
default:
@@ -1707,7 +1742,7 @@ int erts_net_message(Port *prt,
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
return 0;
invalid_message:
{
@@ -1723,8 +1758,8 @@ decode_error:
}
data_error:
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
- erts_deliver_port_exit(prt, dep->cid, am_killed, 0, 1);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ erts_kill_dist_connection(dep, conn_id);
+ ERTS_CHK_NO_PROC_LOCKS;
return -1;
}
@@ -1744,6 +1779,31 @@ static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy)
return ret;
}
+static ERTS_INLINE void
+notify_dist_data(Process *c_p, Eterm pid)
+{
+ Process *rp;
+ ErtsProcLocks rp_locks;
+
+ ASSERT(erts_get_scheduler_data()
+ && !ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ ASSERT(is_internal_pid(pid));
+
+ if (c_p && c_p->common.id == pid) {
+ rp = c_p;
+ rp_locks = ERTS_PROC_LOCK_MAIN;
+ }
+ else {
+ rp = erts_proc_lookup(pid);
+ rp_locks = 0;
+ }
+
+ if (rp) {
+ ErtsMessage *mp = erts_alloc_message(0, NULL);
+ erts_queue_message(rp, rp_locks, mp, am_dist_data, am_system);
+ }
+}
+
int
erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
{
@@ -1754,13 +1814,13 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
while (1) {
switch (ctx->phase) {
case ERTS_DSIG_SEND_PHASE_INIT:
- ctx->flags = dsdp->dep->flags;
+ ctx->flags = dsdp->flags;
ctx->c_p = dsdp->proc;
if (!ctx->c_p || dsdp->no_suspend)
ctx->force_busy = 1;
- ERTS_SMP_LC_ASSERT(!ctx->c_p
+ ERTS_LC_ASSERT(!ctx->c_p
|| (ERTS_PROC_LOCK_MAIN
== erts_proc_lc_my_proc_locks(ctx->c_p)));
@@ -1769,33 +1829,32 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
if (ctx->flags & DFLAG_DIST_HDR_ATOM_CACHE) {
ctx->acmp = erts_get_atom_cache_map(ctx->c_p);
- ctx->pass_through_size = 0;
+ ctx->max_finalize_prepend = 0;
}
else {
ctx->acmp = NULL;
- ctx->pass_through_size = 1;
+ ctx->max_finalize_prepend = 3;
}
#ifdef ERTS_DIST_MSG_DBG
- erts_fprintf(stderr, ">>%s CTL: %T\n", ctx->pass_through_size ? "P" : " ", ctx->ctl);
- if (is_value(ctx->msg))
- erts_fprintf(stderr, " MSG: %T\n", ctx->msg);
+ erts_fprintf(stderr, ">> CTL: %T\n", ctx->ctl);
+ if (is_value(ctx->msg))
+ erts_fprintf(stderr, " MSG: %T\n", ctx->msg);
#endif
- ctx->data_size = ctx->pass_through_size;
+ ctx->data_size = ctx->max_finalize_prepend;
erts_reset_atom_cache_map(ctx->acmp);
erts_encode_dist_ext_size(ctx->ctl, ctx->flags, ctx->acmp, &ctx->data_size);
- if (is_value(ctx->msg)) {
- ctx->u.sc.wstack.wstart = NULL;
- ctx->u.sc.flags = ctx->flags;
- ctx->u.sc.level = 0;
- ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_SIZE;
- } else {
- ctx->phase = ERTS_DSIG_SEND_PHASE_ALLOC;
- }
- break;
+ if (is_non_value(ctx->msg)) {
+ ctx->phase = ERTS_DSIG_SEND_PHASE_ALLOC;
+ break;
+ }
+ ctx->u.sc.wstack.wstart = NULL;
+ ctx->u.sc.flags = ctx->flags;
+ ctx->u.sc.level = 0;
+ ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_SIZE;
case ERTS_DSIG_SEND_PHASE_MSG_SIZE:
if (erts_encode_dist_ext_size_int(ctx->msg, ctx, &ctx->data_size)) {
retval = ERTS_DSIG_SEND_CONTINUE;
@@ -1810,23 +1869,25 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
ctx->data_size += ctx->dhdr_ext_size;
ctx->obuf = alloc_dist_obuf(ctx->data_size);
- ctx->obuf->ext_endp = &ctx->obuf->data[0] + ctx->pass_through_size + ctx->dhdr_ext_size;
+ ctx->obuf->ext_endp = &ctx->obuf->data[0] + ctx->max_finalize_prepend + ctx->dhdr_ext_size;
/* Encode internal version of dist header */
ctx->obuf->extp = erts_encode_ext_dist_header_setup(ctx->obuf->ext_endp, ctx->acmp);
/* Encode control message */
erts_encode_dist_ext(ctx->ctl, &ctx->obuf->ext_endp, ctx->flags, ctx->acmp, NULL, NULL);
- if (is_value(ctx->msg)) {
- ctx->u.ec.flags = ctx->flags;
- ctx->u.ec.level = 0;
- ctx->u.ec.wstack.wstart = NULL;
- ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_ENCODE;
- } else {
- ctx->phase = ERTS_DSIG_SEND_PHASE_FIN;
- }
- break;
-
- case ERTS_DSIG_SEND_PHASE_MSG_ENCODE:
+ if (is_non_value(ctx->msg)) {
+ ctx->obuf->msg_start = NULL;
+ ctx->phase = ERTS_DSIG_SEND_PHASE_FIN;
+ break;
+ }
+ ctx->u.ec.flags = ctx->flags;
+ ctx->u.ec.hopefull_flags = 0;
+ ctx->u.ec.level = 0;
+ ctx->u.ec.wstack.wstart = NULL;
+ ctx->obuf->msg_start = ctx->obuf->ext_endp;
+
+ ctx->phase = ERTS_DSIG_SEND_PHASE_MSG_ENCODE;
+ case ERTS_DSIG_SEND_PHASE_MSG_ENCODE:
if (erts_encode_dist_ext(ctx->msg, &ctx->obuf->ext_endp, ctx->flags, ctx->acmp, &ctx->u.ec, &ctx->reds)) {
retval = ERTS_DSIG_SEND_CONTINUE;
goto done;
@@ -1839,38 +1900,59 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
int resume = 0;
ASSERT(ctx->obuf->extp < ctx->obuf->ext_endp);
- ASSERT(&ctx->obuf->data[0] <= ctx->obuf->extp - ctx->pass_through_size);
+ ASSERT(&ctx->obuf->data[0] <= ctx->obuf->extp - ctx->max_finalize_prepend);
ASSERT(ctx->obuf->ext_endp <= &ctx->obuf->data[0] + ctx->data_size);
ctx->data_size = ctx->obuf->ext_endp - ctx->obuf->extp;
+ ctx->obuf->hopefull_flags = ctx->u.ec.hopefull_flags;
/*
* Signal encoded; now verify that the connection still exists,
* and if so enqueue the signal and schedule it for send.
*/
ctx->obuf->next = NULL;
- erts_smp_de_rlock(dep);
+ erts_de_rlock(dep);
cid = dep->cid;
- if (cid != dsdp->cid
- || dep->connection_id != dsdp->connection_id
- || dep->status & ERTS_DE_SFLG_EXITING) {
+ if (dep->state == ERTS_DE_STATE_EXITING
+ || dep->state == ERTS_DE_STATE_IDLE
+ || dep->connection_id != dsdp->connection_id) {
/* Not the same connection as when we started; drop message... */
- erts_smp_de_runlock(dep);
+ erts_de_runlock(dep);
free_dist_obuf(ctx->obuf);
}
else {
+ Sint qsize;
+ erts_aint32_t qflgs;
ErtsProcList *plp = NULL;
- erts_smp_mtx_lock(&dep->qlock);
- dep->qsize += size_obuf(ctx->obuf);
- if (dep->qsize >= erts_dist_buf_busy_limit)
- dep->qflgs |= ERTS_DE_QFLG_BUSY;
- if (!ctx->force_busy && (dep->qflgs & ERTS_DE_QFLG_BUSY)) {
- erts_smp_mtx_unlock(&dep->qlock);
+ Eterm notify_proc = NIL;
+ Sint obsz = size_obuf(ctx->obuf);
+
+ erts_mtx_lock(&dep->qlock);
+ qsize = erts_atomic_add_read_nob(&dep->qsize, (erts_aint_t) obsz);
+ ASSERT(qsize >= obsz);
+ qflgs = erts_atomic32_read_nob(&dep->qflgs);
+ if (!(qflgs & ERTS_DE_QFLG_BUSY) && qsize >= erts_dist_buf_busy_limit) {
+ erts_atomic32_read_bor_relb(&dep->qflgs, ERTS_DE_QFLG_BUSY);
+ qflgs |= ERTS_DE_QFLG_BUSY;
+ }
+ if (qsize == obsz && (qflgs & ERTS_DE_QFLG_REQ_INFO)) {
+ /* Previously empty queue and info requested... */
+ qflgs = erts_atomic32_read_band_mb(&dep->qflgs,
+ ~ERTS_DE_QFLG_REQ_INFO);
+ if (qflgs & ERTS_DE_QFLG_REQ_INFO) {
+ notify_proc = dep->cid;
+ ASSERT(is_internal_pid(notify_proc));
+ }
+ /* else: requester will send itself the message... */
+ qflgs &= ~ERTS_DE_QFLG_REQ_INFO;
+ }
+ if (!ctx->force_busy && (qflgs & ERTS_DE_QFLG_BUSY)) {
+ erts_mtx_unlock(&dep->qlock);
plp = erts_proclist_create(ctx->c_p);
erts_suspend(ctx->c_p, ERTS_PROC_LOCK_MAIN, NULL);
suspended = 1;
- erts_smp_mtx_lock(&dep->qlock);
+ erts_mtx_lock(&dep->qlock);
}
/* Enqueue obuf on dist entry */
@@ -1881,7 +1963,8 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
dep->out_queue.last = ctx->obuf;
if (!ctx->force_busy) {
- if (!(dep->qflgs & ERTS_DE_QFLG_BUSY)) {
+ qflgs = erts_atomic32_read_nob(&dep->qflgs);
+ if (!(qflgs & ERTS_DE_QFLG_BUSY)) {
if (suspended)
resume = 1; /* was busy when we started, but isn't now */
#ifdef USE_VM_PROBES
@@ -1905,9 +1988,17 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx)
}
}
- erts_smp_mtx_unlock(&dep->qlock);
- erts_schedule_dist_command(NULL, dep);
- erts_smp_de_runlock(dep);
+ erts_mtx_unlock(&dep->qlock);
+ if (dep->state != ERTS_DE_STATE_PENDING) {
+ if (is_internal_port(dep->cid))
+ erts_schedule_dist_command(NULL, dep);
+ }
+ else {
+ notify_proc = NIL;
+ }
+ erts_de_runlock(dep);
+ if (is_internal_pid(notify_proc))
+ notify_dist_data(ctx->c_p, notify_proc);
if (resume) {
erts_resume(ctx->c_p, ERTS_PROC_LOCK_MAIN);
@@ -1961,35 +2052,39 @@ static Uint
dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
{
int fpe_was_unmasked;
- Uint size = obuf->ext_endp - obuf->extp;
+ ErlDrvSizeT size;
+ char *bufp;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
- if (size > (Uint) INT_MAX)
- erts_exit(ERTS_DUMP_EXIT,
- "Absurdly large distribution output data buffer "
- "(%beu bytes) passed.\n",
- size);
+ if (!obuf) {
+ size = 0;
+ bufp = NULL;
+ }
+ else {
+ size = obuf->ext_endp - obuf->extp;
+ bufp = (char*) obuf->extp;
+ }
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(dist_output)) {
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
"%T", prt->common.id);
erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
- "%T", prt->dist_entry->sysname);
+ "%T", dep->sysname);
DTRACE4(dist_output, erts_this_node_sysname, port_str,
remote_str, size);
}
#endif
+
prt->caller = NIL;
fpe_was_unmasked = erts_block_fpe();
- (*prt->drv_ptr->output)((ErlDrvData) prt->drv_data,
- (char*) obuf->extp,
- (int) size);
+ (*prt->drv_ptr->output)((ErlDrvData) prt->drv_data, bufp, size);
erts_unblock_fpe(fpe_was_unmasked);
return size;
}
@@ -1998,44 +2093,53 @@ static Uint
dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
{
int fpe_was_unmasked;
- Uint size = obuf->ext_endp - obuf->extp;
+ ErlDrvSizeT size;
SysIOVec iov[2];
ErlDrvBinary* bv[2];
ErlIOVec eiov;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
- if (size > (Uint) INT_MAX)
- erts_exit(ERTS_DUMP_EXIT,
- "Absurdly large distribution output data buffer "
- "(%beu bytes) passed.\n",
- size);
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
iov[0].iov_base = NULL;
iov[0].iov_len = 0;
bv[0] = NULL;
- iov[1].iov_base = obuf->extp;
- iov[1].iov_len = size;
- bv[1] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf));
+ if (!obuf) {
+ size = 0;
+ eiov.vsize = 1;
+ }
+ else {
+ size = obuf->ext_endp - obuf->extp;
+ eiov.vsize = 2;
+
+ iov[1].iov_base = obuf->extp;
+ iov[1].iov_len = size;
+ bv[1] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf));
+ }
- eiov.vsize = 2;
eiov.size = size;
eiov.iov = iov;
eiov.binv = bv;
+ if (size > (Uint) INT_MAX)
+ erts_exit(ERTS_DUMP_EXIT,
+ "Absurdly large distribution output data buffer "
+ "(%beu bytes) passed.\n",
+ size);
+
ASSERT(prt->drv_ptr->outputv);
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(dist_outputv)) {
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
"%T", prt->common.id);
erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
- "%T", prt->dist_entry->sysname);
+ "%T", dep->sysname);
DTRACE4(dist_outputv, erts_this_node_sysname, port_str,
remote_str, size);
}
@@ -2058,7 +2162,6 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
#endif
#define ERTS_PORT_REDS_DIST_CMD_START 5
-#define ERTS_PORT_REDS_DIST_CMD_FINALIZE 3
#define ERTS_PORT_REDS_DIST_CMD_EXIT 200
#define ERTS_PORT_REDS_DIST_CMD_RESUMED 5
#define ERTS_PORT_REDS_DIST_CMD_DATA(SZ) \
@@ -2067,37 +2170,36 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
: ((((Sint) (SZ)) >> 10) & ((Sint) ERTS_PORT_REDS_MASK__)))
int
-erts_dist_command(Port *prt, int reds_limit)
+erts_dist_command(Port *prt, int initial_reds)
{
- Sint reds = ERTS_PORT_REDS_DIST_CMD_START;
- Uint32 status;
+ Sint reds = initial_reds - ERTS_PORT_REDS_DIST_CMD_START;
+ enum dist_entry_state state;
Uint32 flags;
- Sint obufsize = 0;
+ Sint qsize, obufsize = 0;
ErtsDistOutputQueue oq, foq;
- DistEntry *dep = prt->dist_entry;
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf);
erts_aint32_t sched_flags;
ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
- erts_refc_inc(&dep->refc, 1); /* Otherwise dist_entry might be
- removed if port command fails */
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 0);
+ erts_atomic_set_mb(&dep->dist_cmd_scheduled, 0);
- erts_smp_de_rlock(dep);
+ erts_de_rlock(dep);
flags = dep->flags;
- status = dep->status;
+ state = dep->state;
send = dep->send;
- erts_smp_de_runlock(dep);
+ erts_de_runlock(dep);
- if (status & ERTS_DE_SFLG_EXITING) {
+ if (state == ERTS_DE_STATE_EXITING) {
erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1);
- erts_deref_dist_entry(dep);
- return reds + ERTS_PORT_REDS_DIST_CMD_EXIT;
+ reds -= ERTS_PORT_REDS_DIST_CMD_EXIT;
+ return initial_reds - reds;
}
+ ASSERT(state != ERTS_DE_STATE_PENDING);
+
ASSERT(send);
/*
@@ -2108,42 +2210,42 @@ erts_dist_command(Port *prt, int reds_limit)
* a mess.
*/
- erts_smp_mtx_lock(&dep->qlock);
+ erts_mtx_lock(&dep->qlock);
oq.first = dep->out_queue.first;
oq.last = dep->out_queue.last;
dep->out_queue.first = NULL;
dep->out_queue.last = NULL;
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_unlock(&dep->qlock);
foq.first = dep->finalized_out_queue.first;
foq.last = dep->finalized_out_queue.last;
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
- if (reds > reds_limit)
+ if (reds < 0)
goto preempted;
if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT) && foq.first) {
int preempt = 0;
do {
- Uint size;
- ErtsDistOutputBuf *fob;
-
- size = (*send)(prt, foq.first);
- esdp->io.out += (Uint64) size;
+ Uint size;
+ ErtsDistOutputBuf *fob;
+ size = (*send)(prt, foq.first);
+ erts_atomic64_inc_nob(&dep->out);
+ esdp->io.out += (Uint64) size;
#ifdef ERTS_RAW_DIST_MSG_DBG
- erts_fprintf(stderr, ">> ");
- bw(foq.first->extp, size);
+ erts_fprintf(stderr, ">> ");
+ bw(foq.first->extp, size);
#endif
- reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
+ reds -= ERTS_PORT_REDS_DIST_CMD_DATA(size);
fob = foq.first;
obufsize += size_obuf(fob);
foq.first = foq.first->next;
free_dist_obuf(fob);
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
- preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
+ preempt = reds < 0 || (sched_flags & ERTS_PTS_FLG_EXIT);
if (sched_flags & ERTS_PTS_FLG_BUSY_PORT)
break;
} while (foq.first && !preempt);
@@ -2156,79 +2258,71 @@ erts_dist_command(Port *prt, int reds_limit)
if (sched_flags & ERTS_PTS_FLG_BUSY_PORT) {
if (oq.first) {
ErtsDistOutputBuf *ob;
- int preempt;
+ ErtsDistOutputBuf *last_finalized = NULL;
finalize_only:
- preempt = 0;
ob = oq.first;
ASSERT(ob);
do {
- ob->extp = erts_encode_ext_dist_header_finalize(ob->extp,
- dep->cache,
- flags);
- if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
- *--ob->extp = PASS_THROUGH; /* Old node; 'pass through'
- needed */
- ASSERT(&ob->data[0] <= ob->extp && ob->extp < ob->ext_endp);
- reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE;
- preempt = reds > reds_limit;
- if (preempt)
- break;
- ob = ob->next;
+ reds = erts_encode_ext_dist_header_finalize(ob, dep, flags, reds);
+ if (reds < 0)
+ break;
+ last_finalized = ob;
+ ob = ob->next;
} while (ob);
- /*
- * At least one buffer was finalized; if we got preempted,
- * ob points to the last buffer that we finalized.
- */
- if (foq.last)
- foq.last->next = oq.first;
- else
- foq.first = oq.first;
- if (!preempt) {
- /* All buffers finalized */
- foq.last = oq.last;
- oq.first = oq.last = NULL;
- }
- else {
- /* Not all buffers finalized; split oq. */
- foq.last = ob;
- oq.first = ob->next;
- if (oq.first)
- ob->next = NULL;
- else
- oq.last = NULL;
- }
- if (preempt)
- goto preempted;
+ if (last_finalized) {
+ /*
+ * At least one buffer was finalized; if we got preempted,
+ * ob points to the next buffer to continue finalize.
+ */
+ if (foq.last)
+ foq.last->next = oq.first;
+ else
+ foq.first = oq.first;
+ foq.last = last_finalized;
+ if (!ob) {
+ /* All buffers finalized */
+ ASSERT(foq.last == oq.last);
+ ASSERT(foq.last->next == NULL);
+ oq.first = oq.last = NULL;
+ }
+ else {
+ /* Not all buffers finalized; split oq. */
+ ASSERT(foq.last->next == ob);
+ foq.last->next = NULL;
+ oq.first = ob;
+ }
+ }
+ if (reds <= 0)
+ goto preempted;
}
}
else {
+ int de_busy;
int preempt = 0;
while (oq.first && !preempt) {
ErtsDistOutputBuf *fob;
Uint size;
- oq.first->extp
- = erts_encode_ext_dist_header_finalize(oq.first->extp,
- dep->cache,
- flags);
- reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE;
- if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
- *--oq.first->extp = PASS_THROUGH; /* Old node; 'pass through'
- needed */
+ reds = erts_encode_ext_dist_header_finalize(oq.first, dep, flags, reds);
+ if (reds < 0) {
+ preempt = 1;
+ break;
+ }
ASSERT(&oq.first->data[0] <= oq.first->extp
- && oq.first->extp < oq.first->ext_endp);
+ && oq.first->extp <= oq.first->ext_endp);
size = (*send)(prt, oq.first);
+ erts_atomic64_inc_nob(&dep->out);
esdp->io.out += (Uint64) size;
#ifdef ERTS_RAW_DIST_MSG_DBG
- erts_fprintf(stderr, ">> ");
- bw(oq.first->extp, size);
+ erts_fprintf(stderr, ">> ");
+ bw(oq.first->extp, size);
#endif
- reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
+ reds -= ERTS_PORT_REDS_DIST_CMD_DATA(size);
fob = oq.first;
obufsize += size_obuf(fob);
oq.first = oq.first->next;
free_dist_obuf(fob);
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
- preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
+ preempt = reds <= 0 || (sched_flags & ERTS_PTS_FLG_EXIT);
if ((sched_flags & ERTS_PTS_FLG_BUSY_PORT) && oq.first && !preempt)
goto finalize_only;
}
@@ -2254,23 +2348,24 @@ erts_dist_command(Port *prt, int reds_limit)
* dist entry in a non-busy state and resume suspended
* processes.
*/
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qsize >= obufsize);
- dep->qsize -= obufsize;
+ erts_mtx_lock(&dep->qlock);
+ de_busy = !!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_BUSY);
+ qsize = (Sint) erts_atomic_add_read_nob(&dep->qsize,
+ (erts_aint_t) -obufsize);
+ ASSERT(qsize >= 0);
obufsize = 0;
if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT)
- && (dep->qflgs & ERTS_DE_QFLG_BUSY)
- && dep->qsize < erts_dist_buf_busy_limit) {
+ && de_busy && qsize < erts_dist_buf_busy_limit) {
ErtsProcList *suspendees;
int resumed;
suspendees = get_suspended_on_de(dep, ERTS_DE_QFLG_BUSY);
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_unlock(&dep->qlock);
resumed = erts_resume_processes(suspendees);
- reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED;
+ reds -= resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED;
}
else
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_unlock(&dep->qlock);
}
ASSERT(!oq.first && !oq.last);
@@ -2279,14 +2374,18 @@ erts_dist_command(Port *prt, int reds_limit)
if (obufsize != 0) {
ASSERT(obufsize > 0);
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qsize >= obufsize);
- dep->qsize -= obufsize;
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_lock(&dep->qlock);
+#ifdef DEBUG
+ qsize = (Sint) erts_atomic_add_read_nob(&dep->qsize,
+ (erts_aint_t) -obufsize);
+ ASSERT(qsize >= 0);
+#else
+ erts_atomic_add_nob(&dep->qsize, (erts_aint_t) -obufsize);
+#endif
+ erts_mtx_unlock(&dep->qlock);
}
- ASSERT(foq.first || !foq.last);
- ASSERT(!foq.first || foq.last);
+ ASSERT(!!foq.first == !!foq.last);
ASSERT(!dep->finalized_out_queue.first);
ASSERT(!dep->finalized_out_queue.last);
@@ -2296,12 +2395,10 @@ erts_dist_command(Port *prt, int reds_limit)
}
/* Avoid wrapping reduction counter... */
- if (reds > INT_MAX/2)
- reds = INT_MAX/2;
-
- erts_deref_dist_entry(dep);
+ if (reds < INT_MIN/2)
+ reds = INT_MIN/2;
- return reds;
+ return initial_reds - reds;
preempted:
/*
@@ -2309,8 +2406,7 @@ erts_dist_command(Port *prt, int reds_limit)
* since last call to driver.
*/
- ASSERT(oq.first || !oq.last);
- ASSERT(!oq.first || oq.last);
+ ASSERT(!!oq.first == !!oq.last);
if (sched_flags & ERTS_PTS_FLG_EXIT) {
/*
@@ -2334,12 +2430,6 @@ erts_dist_command(Port *prt, int reds_limit)
foq.first = NULL;
foq.last = NULL;
-
-#ifdef DEBUG
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qsize == obufsize);
- erts_smp_mtx_unlock(&dep->qlock);
-#endif
}
else {
if (oq.first) {
@@ -2347,14 +2437,14 @@ erts_dist_command(Port *prt, int reds_limit)
* Unhandle buffers need to be put back first
* in out_queue.
*/
- erts_smp_mtx_lock(&dep->qlock);
- dep->qsize -= obufsize;
+ erts_mtx_lock(&dep->qlock);
+ erts_atomic_add_nob(&dep->qsize, -obufsize);
obufsize = 0;
oq.last->next = dep->out_queue.first;
dep->out_queue.first = oq.first;
if (!dep->out_queue.last)
dep->out_queue.last = oq.last;
- erts_smp_mtx_unlock(&dep->qlock);
+ erts_mtx_unlock(&dep->qlock);
}
erts_schedule_dist_command(prt, NULL);
@@ -2362,18 +2452,406 @@ erts_dist_command(Port *prt, int reds_limit)
goto done;
}
+#if 0
+
+int
+dist_data_finalize(Process *c_p, int reds_limit)
+{
+ int reds = 5;
+ DistEntry *dep = ;
+ ErtsDistOutputQueue oq, foq;
+ ErtsDistOutputBuf *ob;
+ int preempt;
+
+
+ erts_mtx_lock(&dep->qlock);
+ flags = dep->flags;
+ oq.first = dep->out_queue.first;
+ oq.last = dep->out_queue.last;
+ dep->out_queue.first = NULL;
+ dep->out_queue.last = NULL;
+ erts_mtx_unlock(&dep->qlock);
+
+ if (!oq.first) {
+ ASSERT(!oq.last);
+ oq.first = dep->tmp_out_queue.first;
+ oq.last = dep->tmp_out_queue.last;
+ }
+ else {
+ ErtsDistOutputBuf *f, *l;
+ ASSERT(oq.last);
+ if (dep->tmp_out_queue.last) {
+ dep->tmp_out_queue.last->next = oq.first;
+ oq.first = dep->tmp_out_queue.first;
+ }
+ }
+
+ if (!oq.first) {
+ /* Nothing to do... */
+ ASSERT(!oq.last);
+ return reds;
+ }
+
+ foq.first = dep->finalized_out_queue.first;
+ foq.last = dep->finalized_out_queue.last;
+
+ preempt = 0;
+ ob = oq.first;
+ ASSERT(ob);
+
+ do {
+ ob->extp = erts_encode_ext_dist_header_finalize(ob->extp,
+ dep->cache,
+ flags);
+ if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ *--ob->extp = PASS_THROUGH; /* Old node; 'pass through'
+ needed */
+ ASSERT(&ob->data[0] <= ob->extp && ob->extp < ob->ext_endp);
+ reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE;
+ preempt = reds > reds_limit;
+ if (preempt)
+ break;
+ ob = ob->next;
+ } while (ob);
+ /*
+ * At least one buffer was finalized; if we got preempted,
+ * ob points to the last buffer that we finalized.
+ */
+ if (foq.last)
+ foq.last->next = oq.first;
+ else
+ foq.first = oq.first;
+ if (!preempt) {
+ /* All buffers finalized */
+ foq.last = oq.last;
+ oq.first = oq.last = NULL;
+ }
+ else {
+ /* Not all buffers finalized; split oq. */
+ foq.last = ob;
+ oq.first = ob->next;
+ if (oq.first)
+ ob->next = NULL;
+ else
+ oq.last = NULL;
+ }
+
+ dep->finalized_out_queue.first = foq.first;
+ dep->finalized_out_queue.last = foq.last;
+ dep->tmp_out_queue.first = oq.first;
+ dep->tmp_out_queue.last = oq.last;
+
+ return reds;
+}
+
+#endif
+
+BIF_RETTYPE
+dist_ctrl_get_data_notification_1(BIF_ALIST_1)
+{
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P);
+ erts_aint32_t qflgs;
+ erts_aint_t qsize;
+ Eterm receiver = NIL;
+ Uint32 conn_id;
+
+ if (!dep)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id) != dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ /*
+ * Caller is the only one that can consume from this queue
+ * and the only one that can set the req-info flag...
+ */
+
+ erts_de_rlock(dep);
+
+ if (dep->connection_id != conn_id) {
+ erts_de_runlock(dep);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ASSERT(dep->cid == BIF_P->common.id);
+
+ qflgs = erts_atomic32_read_acqb(&dep->qflgs);
+
+ if (!(qflgs & ERTS_DE_QFLG_REQ_INFO)) {
+ qsize = erts_atomic_read_acqb(&dep->qsize);
+ ASSERT(qsize >= 0);
+ if (qsize > 0)
+ receiver = BIF_P->common.id; /* Notify ourselves... */
+ else { /* Empty queue; set req-info flag... */
+ qflgs = erts_atomic32_read_bor_mb(&dep->qflgs,
+ ERTS_DE_QFLG_REQ_INFO);
+ qsize = erts_atomic_read_acqb(&dep->qsize);
+ ASSERT(qsize >= 0);
+ if (qsize > 0) {
+ qflgs = erts_atomic32_read_band_mb(&dep->qflgs,
+ ~ERTS_DE_QFLG_REQ_INFO);
+ if (qflgs & ERTS_DE_QFLG_REQ_INFO)
+ receiver = BIF_P->common.id; /* Notify ourselves... */
+ /* else: someone else will notify us... */
+ }
+ /* else: still empty queue... */
+ }
+ }
+ /* else: Already requested... */
+
+ erts_de_runlock(dep);
+
+ if (is_internal_pid(receiver))
+ notify_dist_data(BIF_P, receiver);
+
+ BIF_RET(am_ok);
+}
+
+BIF_RETTYPE
+dist_ctrl_put_data_2(BIF_ALIST_2)
+{
+ DistEntry *dep;
+ ErlDrvSizeT size;
+ Eterm input_handler;
+ Uint32 conn_id;
+
+ if (is_binary(BIF_ARG_2))
+ size = binary_size(BIF_ARG_2);
+ else if (is_nil(BIF_ARG_2))
+ size = 0;
+ else if (is_list(BIF_ARG_2))
+ BIF_TRAP2(dist_ctrl_put_data_trap,
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ else
+ BIF_ERROR(BIF_P, BADARG);
+
+ dep = erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id);
+ if (!dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ input_handler = (Eterm) erts_atomic_read_nob(&dep->input_handler);
+
+ if (input_handler != BIF_P->common.id)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ erts_atomic64_inc_nob(&dep->in);
+
+ if (size != 0) {
+ byte *data, *temp_alloc = NULL;
+
+ data = (byte *) erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc);
+ if (!data)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ (void) erts_net_message(NULL, dep, conn_id, NULL, 0, data, size);
+ /*
+ * We ignore any decode failures. On fatal failures the
+ * connection will be taken down by killing the
+ * distribution channel controller...
+ */
+
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ BUMP_REDS(BIF_P, 5);
+
+ erts_free_aligned_binary_bytes(temp_alloc);
+
+ }
+
+ BIF_RET(am_ok);
+}
+
+BIF_RETTYPE
+dist_get_stat_1(BIF_ALIST_1)
+{
+ Sint64 read, write, pend;
+ Eterm res, *hp, **hpp;
+ Uint sz, *szp;
+ Uint32 conn_id;
+ DistEntry *dep = erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id);
+
+ if (!dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_de_rlock(dep);
+
+ if (dep->connection_id != conn_id) {
+ erts_de_runlock(dep);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ read = (Sint64) erts_atomic64_read_nob(&dep->in);
+ write = (Sint64) erts_atomic64_read_nob(&dep->out);
+ pend = (Sint64) erts_atomic_read_nob(&dep->qsize);
+
+ erts_de_runlock(dep);
+
+ sz = 0;
+ szp = &sz;
+ hpp = NULL;
+
+ while (1) {
+ res = erts_bld_tuple(hpp, szp, 4,
+ am_ok,
+ erts_bld_sint64(hpp, szp, read),
+ erts_bld_sint64(hpp, szp, write),
+ pend ? am_true : am_false);
+ if (hpp)
+ break;
+ hp = HAlloc(BIF_P, sz);
+ hpp = &hp;
+ szp = NULL;
+ }
+
+ BIF_RET(res);
+}
+
+BIF_RETTYPE
+dist_ctrl_input_handler_2(BIF_ALIST_2)
+{
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P);
+ Uint32 conn_id;
+
+ if (!dep)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id) != dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (is_not_internal_pid(BIF_ARG_2))
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_de_rlock(dep);
+ if (dep->connection_id != conn_id) {
+ erts_de_runlock(dep);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_atomic_set_nob(&dep->input_handler,
+ (erts_aint_t) BIF_ARG_2);
+ erts_de_runlock(dep);
+ BIF_RET(am_ok);
+}
+
+BIF_RETTYPE
+dist_ctrl_get_data_1(BIF_ALIST_1)
+{
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P);
+ const Sint initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ Sint reds = initial_reds;
+ ErtsDistOutputBuf *obuf;
+ Eterm *hp;
+ ProcBin *pb;
+ erts_aint_t qsize;
+ Uint32 conn_id;
+
+ if (!dep)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (erts_dhandle_to_dist_entry(BIF_ARG_1, &conn_id) != dep)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_de_rlock(dep);
+
+ if (dep->connection_id != conn_id) {
+ erts_de_runlock(dep);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (dep->state == ERTS_DE_STATE_EXITING)
+ goto return_none;
+
+ ASSERT(dep->cid == BIF_P->common.id);
+
+#if 0
+ if (dep->finalized_out_queue.first) {
+ obuf = dep->finalized_out_queue.first;
+ dep->finalized_out_queue.first = obuf->next;
+ if (!obuf->next)
+ dep->finalized_out_queue.last = NULL;
+ }
+ else
+#endif
+ {
+ if (!dep->tmp_out_queue.first) {
+ ASSERT(!dep->tmp_out_queue.last);
+ ASSERT(!dep->transcode_ctx);
+ qsize = erts_atomic_read_acqb(&dep->qsize);
+ if (qsize > 0) {
+ erts_mtx_lock(&dep->qlock);
+ dep->tmp_out_queue.first = dep->out_queue.first;
+ dep->tmp_out_queue.last = dep->out_queue.last;
+ dep->out_queue.first = NULL;
+ dep->out_queue.last = NULL;
+ erts_mtx_unlock(&dep->qlock);
+ }
+ }
+
+ if (!dep->tmp_out_queue.first) {
+ ASSERT(!dep->tmp_out_queue.last);
+ return_none:
+ erts_de_runlock(dep);
+ BIF_RET(am_none);
+ }
+
+ obuf = dep->tmp_out_queue.first;
+ reds = erts_encode_ext_dist_header_finalize(obuf, dep, dep->flags, reds);
+ if (reds < 0) {
+ erts_de_runlock(dep);
+ ERTS_BIF_YIELD1(bif_export[BIF_dist_ctrl_get_data_1],
+ BIF_P, BIF_ARG_1);
+ }
+
+ dep->tmp_out_queue.first = obuf->next;
+ if (!obuf->next)
+ dep->tmp_out_queue.last = NULL;
+ }
+
+ erts_atomic64_inc_nob(&dep->out);
+
+ erts_de_runlock(dep);
+
+ hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ pb = (ProcBin *) (char *) hp;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = obuf->ext_endp - obuf->extp;
+ pb->next = MSO(BIF_P).first;
+ MSO(BIF_P).first = (struct erl_off_heap_header*) pb;
+ pb->val = ErtsDistOutputBuf2Binary(obuf);
+ pb->bytes = (byte*) obuf->extp;
+ pb->flags = 0;
+
+ qsize = erts_atomic_add_read_nob(&dep->qsize, -size_obuf(obuf));
+ ASSERT(qsize >= 0);
+
+ if (qsize < erts_dist_buf_busy_limit/2
+ && (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY)) {
+ ErtsProcList *resume_procs = NULL;
+ erts_mtx_lock(&dep->qlock);
+ resume_procs = get_suspended_on_de(dep, ERTS_DE_QFLG_BUSY);
+ erts_mtx_unlock(&dep->qlock);
+ if (resume_procs) {
+ int resumed = erts_resume_processes(resume_procs);
+ reds -= resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED;
+ }
+ }
+
+ BIF_RET2(make_binary(pb), (initial_reds - reds));
+}
+
void
erts_dist_port_not_busy(Port *prt)
{
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(dist_port_not_busy)) {
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
"%T", prt->common.id);
erts_snprintf(remote_str, sizeof(DTRACE_CHARBUF_NAME(remote_str)),
- "%T", prt->dist_entry->sysname);
+ "%T", dep->sysname);
DTRACE3(dist_port_not_busy, erts_this_node_sysname,
port_str, remote_str);
}
@@ -2381,86 +2859,93 @@ erts_dist_port_not_busy(Port *prt)
erts_schedule_dist_command(prt, NULL);
}
-void
-erts_kill_dist_connection(DistEntry *dep, Uint32 connection_id)
+static void kill_connection(DistEntry *dep)
{
- erts_smp_de_rwlock(dep);
- if (is_internal_port(dep->cid)
- && connection_id == dep->connection_id
- && !(dep->status & ERTS_DE_SFLG_EXITING)) {
-
- dep->status |= ERTS_DE_SFLG_EXITING;
+ ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
+ ASSERT(dep->state == ERTS_DE_STATE_CONNECTED);
+
+ dep->state = ERTS_DE_STATE_EXITING;
+ erts_mtx_lock(&dep->qlock);
+ ASSERT(!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT));
+ erts_atomic32_read_bor_nob(&dep->qflgs, ERTS_DE_QFLG_EXIT);
+ erts_mtx_unlock(&dep->qlock);
+
+ if (is_internal_port(dep->cid))
+ erts_schedule_dist_command(NULL, dep);
+ else if (is_internal_pid(dep->cid))
+ schedule_kill_dist_ctrl_proc(dep->cid);
+}
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT));
- dep->qflgs |= ERTS_DE_QFLG_EXIT;
- erts_smp_mtx_unlock(&dep->qlock);
+void
+erts_kill_dist_connection(DistEntry *dep, Uint32 conn_id)
+{
+ erts_de_rwlock(dep);
+ if (conn_id == dep->connection_id
+ && dep->state == ERTS_DE_STATE_CONNECTED) {
- erts_schedule_dist_command(NULL, dep);
+ kill_connection(dep);
}
- erts_smp_de_rwunlock(dep);
+ erts_de_rwunlock(dep);
}
struct print_to_data {
- int to;
+ fmtfn_t to;
void *arg;
};
static void doit_print_monitor_info(ErtsMonitor *mon, void *vptdp)
{
- int to = ((struct print_to_data *) vptdp)->to;
+ fmtfn_t to = ((struct print_to_data *) vptdp)->to;
void *arg = ((struct print_to_data *) vptdp)->arg;
- Process *rp;
- ErtsMonitor *rmon;
- rp = erts_proc_lookup(mon->pid);
- if (!rp || (rmon = erts_lookup_monitor(ERTS_P_MONITORS(rp), mon->ref)) == NULL) {
- erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->pid);
- } else if (mon->type == MON_ORIGIN) {
- /* Local pid is being monitored */
+ ErtsMonitorDataExtended *mdep;
+
+ ASSERT(mon->flags & ERTS_ML_FLG_EXTENDED);
+
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
+
+ ASSERT(mdep->dist);
+
+ if (erts_monitor_is_origin(mon)) {
erts_print(to, arg, "Remotely monitored by: %T %T\n",
- mon->pid, rmon->pid);
- } else {
- erts_print(to, arg, "Remote monitoring: %T ", mon->pid);
- if (is_not_atom(rmon->pid))
- erts_print(to, arg, "%T\n", rmon->pid);
- else
- erts_print(to, arg, "{%T, %T}\n",
- rmon->name,
- rmon->pid); /* which in this case is the
- remote system name... */
+ mon->other.item, mdep->md.target.other.item);
+ }
+ else {
+ erts_print(to, arg, "Remote monitoring: %T ", mon->other.item);
+ if (mon->flags & ERTS_ML_FLG_NAME)
+ erts_print(to, arg, "{%T, %T}\n", mdep->u.name, mdep->dist->nodename);
+ else
+ erts_print(to, arg, "%T\n", mdep->md.origin.other.item);
}
}
-static void print_monitor_info(int to, void *arg, ErtsMonitor *mon)
+static void print_monitor_info(fmtfn_t to, void *arg, DistEntry *dep)
{
struct print_to_data ptd = {to, arg};
- erts_doforall_monitors(mon,&doit_print_monitor_info,&ptd);
-}
-
-typedef struct {
- struct print_to_data *ptdp;
- Eterm from;
-} PrintLinkContext;
-
-static void doit_print_link_info2(ErtsLink *lnk, void *vpplc)
-{
- PrintLinkContext *pplc = (PrintLinkContext *) vpplc;
- erts_print(pplc->ptdp->to, pplc->ptdp->arg, "Remote link: %T %T\n",
- pplc->from, lnk->pid);
+ if (dep->mld) {
+ erts_monitor_list_foreach(dep->mld->monitors,
+ doit_print_monitor_info,
+ (void *) &ptd);
+ erts_monitor_tree_foreach(dep->mld->orig_name_monitors,
+ doit_print_monitor_info,
+ (void *) &ptd);
+ }
}
static void doit_print_link_info(ErtsLink *lnk, void *vptdp)
{
- if (is_internal_pid(lnk->pid) && erts_proc_lookup(lnk->pid)) {
- PrintLinkContext plc = {(struct print_to_data *) vptdp, lnk->pid};
- erts_doforall_links(ERTS_LINK_ROOT(lnk), &doit_print_link_info2, &plc);
- }
+ struct print_to_data *ptdp = vptdp;
+ ErtsLink *lnk2 = erts_link_to_other(lnk, NULL);
+ erts_print(ptdp->to, ptdp->arg, "Remote link: %T %T\n",
+ lnk2->other.item, lnk->other.item);
}
-static void print_link_info(int to, void *arg, ErtsLink *lnk)
+static void print_link_info(fmtfn_t to, void *arg, DistEntry *dep)
{
struct print_to_data ptd = {to, arg};
- erts_doforall_links(lnk, &doit_print_link_info, (void *) &ptd);
+ if (dep->mld)
+ erts_link_list_foreach(dep->mld->links,
+ doit_print_link_info,
+ (void *) &ptd);
}
typedef struct {
@@ -2468,25 +2953,8 @@ typedef struct {
Eterm sysname;
} PrintNodeLinkContext;
-
-static void doit_print_nodelink_info(ErtsLink *lnk, void *vpcontext)
-{
- PrintNodeLinkContext *pcontext = vpcontext;
-
- if (is_internal_pid(lnk->pid) && erts_proc_lookup(lnk->pid))
- erts_print(pcontext->ptd.to, pcontext->ptd.arg,
- "Remote monitoring: %T %T\n", lnk->pid, pcontext->sysname);
-}
-
-static void print_nodelink_info(int to, void *arg, ErtsLink *lnk, Eterm sysname)
-{
- PrintNodeLinkContext context = {{to, arg}, sysname};
- erts_doforall_links(lnk, &doit_print_nodelink_info, &context);
-}
-
-
static int
-info_dist_entry(int to, void *arg, DistEntry *dep, int visible, int connected)
+info_dist_entry(fmtfn_t to, void *arg, DistEntry *dep, int visible, int connected)
{
if (visible && connected) {
@@ -2513,13 +2981,10 @@ info_dist_entry(int to, void *arg, DistEntry *dep, int visible, int connected)
}
erts_print(to, arg, "Name: %T", dep->sysname);
-#ifdef DEBUG
- erts_print(to, arg, " (refc=%d)", erts_refc_read(&dep->refc, 0));
-#endif
erts_print(to, arg, "\n");
if (!connected && is_nil(dep->cid)) {
- if (dep->nlinks) {
- erts_print(to, arg, "Error: Got links to not connected node:%T\n",
+ if (dep->mld) {
+ erts_print(to, arg, "Error: Got links/monitors to not connected node:%T\n",
dep->sysname);
}
return 0;
@@ -2528,14 +2993,13 @@ info_dist_entry(int to, void *arg, DistEntry *dep, int visible, int connected)
erts_print(to, arg, "Controller: %T\n", dep->cid, to);
erts_print_node_info(to, arg, dep->sysname, NULL, NULL);
- print_monitor_info(to, arg, dep->monitors);
- print_link_info(to, arg, dep->nlinks);
- print_nodelink_info(to, arg, dep->node_links, dep->sysname);
+ print_monitor_info(to, arg, dep);
+ print_link_info(to, arg, dep);
return 0;
}
-int distribution_info(int to, void *arg) /* Called by break handler */
+int distribution_info(fmtfn_t to, void *arg) /* Called by break handler */
{
DistEntry *dep;
@@ -2559,6 +3023,10 @@ int distribution_info(int to, void *arg) /* Called by break handler */
info_dist_entry(to, arg, dep, 0, 1);
}
+ for (dep = erts_pending_dist_entries; dep; dep = dep->next) {
+ info_dist_entry(to, arg, dep, 0, 0);
+ }
+
for (dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
if (dep != erts_this_dist_entry) {
info_dist_entry(to, arg, dep, 0, 0);
@@ -2581,7 +3049,6 @@ int distribution_info(int to, void *arg) /* Called by break handler */
monitor_node -- turn on/off node monitoring
node controller only:
- dist_exit/3 -- send exit signals from remote to local process
dist_link/2 -- link a remote process to a local
dist_unlink/2 -- unlink a remote from a local
****************************************************************************/
@@ -2591,15 +3058,6 @@ int distribution_info(int to, void *arg) /* Called by break handler */
/**********************************************************************
** Set the node name of current node fail if node already is set.
** setnode(name@host, Creation)
- ** loads functions pointer to trap_functions from module erlang.
- ** erlang:dsend/2
- ** erlang:dlink/1
- ** erlang:dunlink/1
- ** erlang:dmonitor_node/3
- ** erlang:dgroup_leader/2
- ** erlang:dexit/2
- ** -- are these needed ?
- ** dexit/1
***********************************************************************/
BIF_RETTYPE setnode_2(BIF_ALIST_2)
@@ -2623,44 +3081,50 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
goto error;
/* Check that all trap functions are defined !! */
- if (dsend2_trap->addressv[0] == NULL ||
- dsend3_trap->addressv[0] == NULL ||
- /* dsend_nosuspend_trap->address == NULL ||*/
- dlink_trap->addressv[0] == NULL ||
- dunlink_trap->addressv[0] == NULL ||
- dmonitor_node_trap->addressv[0] == NULL ||
- dgroup_leader_trap->addressv[0] == NULL ||
- dmonitor_p_trap->addressv[0] == NULL ||
- dexit_trap->addressv[0] == NULL) {
+ if (dmonitor_node_trap->addressv[0] == NULL) {
goto error;
}
- net_kernel = erts_whereis_process(BIF_P, ERTS_PROC_LOCK_MAIN,
- am_net_kernel, ERTS_PROC_LOCK_MAIN, 0);
- if (!net_kernel)
+ net_kernel = erts_whereis_process(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ am_net_kernel,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
+ 0);
+ if (!net_kernel || ERTS_PROC_GET_DIST_ENTRY(net_kernel))
goto error;
/* By setting F_DISTRIBUTION on net_kernel,
- * do_net_exist will be called when net_kernel is terminated !! */
+ * erts_do_net_exits will be called when net_kernel is terminated !! */
net_kernel->flags |= F_DISTRIBUTION;
- if (net_kernel != BIF_P)
- erts_smp_proc_unlock(net_kernel, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(net_kernel,
+ (ERTS_PROC_LOCK_STATUS
+ | ((net_kernel != BIF_P)
+ ? ERTS_PROC_LOCK_MAIN
+ : 0)));
#ifdef DEBUG
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
ASSERT(!erts_visible_dist_entries && !erts_hidden_dist_entries);
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
#endif
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
inc_no_nodes();
erts_set_this_node(BIF_ARG_1, (Uint32) creation);
erts_is_alive = 1;
send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ /*
+ * Note erts_this_dist_entry is changed by erts_set_this_node(),
+ * so we *need* to use the new one after erts_set_this_node()
+ * is called.
+ */
+ erts_ref_dist_entry(erts_this_dist_entry);
+ ERTS_PROC_SET_DIST_ENTRY(net_kernel, erts_this_dist_entry);
BIF_RET(am_true);
@@ -2668,74 +3132,80 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
-/**********************************************************************
- ** Allocate a dist entry, set node name install the connection handler
- ** setnode_3({name@host, Creation}, Cid, {Type, Version, Initial, IC, OC})
- ** Type = flag field, where the flags are specified in dist.h
- ** Version = distribution version, >= 1
- ** IC = in_cookie (ignored)
- ** OC = out_cookie (ignored)
- **
- ** Note that in distribution protocols above 1, the Initial parameter
- ** is always NIL and the cookies are always the atom '', cookies are not
- ** sent in the distribution messages but are only used in
- ** the handshake.
- **
- ***********************************************************************/
+/*
+ * erts_internal:create_dist_channel/4 is used by
+ * erlang:setnode/3.
+ */
-BIF_RETTYPE setnode_3(BIF_ALIST_3)
+typedef struct {
+ DistEntry *dep;
+ Uint flags;
+ Uint version;
+} ErtsSetupConnDistCtrl;
+
+static void
+setup_connection_epiloge_rwunlock(Process *c_p, DistEntry *dep,
+ Eterm ctrlr, Uint flags,
+ Uint version);
+
+static Eterm
+setup_connection_distctrl(Process *c_p, void *arg,
+ int *redsp, ErlHeapFragment **bpp);
+
+BIF_RETTYPE erts_internal_create_dist_channel_4(BIF_ALIST_4)
{
BIF_RETTYPE ret;
Uint flags;
- unsigned long version;
- Eterm ic, oc;
- Eterm *tp;
+ Uint version;
+ Eterm *hp, res_tag = THE_NON_VALUE, res = THE_NON_VALUE;
DistEntry *dep = NULL;
+ int de_locked = 0;
Port *pp = NULL;
- /* Prepare for success */
- ERTS_BIF_PREP_RET(ret, am_true);
-
/*
* Check and pick out arguments
*/
- if (!is_node_name_atom(BIF_ARG_1) ||
- is_not_internal_port(BIF_ARG_2) ||
- (erts_this_node->sysname == am_Noname)) {
- goto badarg;
- }
+ /* Node name... */
+ if (!is_node_name_atom(BIF_ARG_1))
+ goto badarg;
- if (!is_tuple(BIF_ARG_3))
- goto badarg;
- tp = tuple_val(BIF_ARG_3);
- if (*tp++ != make_arityval(4))
- goto badarg;
- if (!is_small(*tp))
- goto badarg;
- flags = unsigned_val(*tp++);
- if (!is_small(*tp) || (version = unsigned_val(*tp)) == 0)
- goto badarg;
- ic = *(++tp);
- oc = *(++tp);
- if (!is_atom(ic) || !is_atom(oc))
- goto badarg;
+ /* Distribution controller... */
+ if (!is_internal_port(BIF_ARG_2) && !is_internal_pid(BIF_ARG_2))
+ goto badarg;
+
+ /* Dist flags... */
+ if (!is_small(BIF_ARG_3))
+ goto badarg;
+ flags = unsigned_val(BIF_ARG_3);
+
+ /* Version... */
+ if (!is_small(BIF_ARG_4))
+ goto badarg;
+ version = unsigned_val(BIF_ARG_4);
+
+ if (version == 0)
+ goto badarg;
- /* DFLAG_EXTENDED_REFERENCES is compulsory from R9 and forward */
- if (!(DFLAG_EXTENDED_REFERENCES & flags)) {
+ if (~flags & DFLAG_DIST_MANDATORY) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "%T", BIF_P->common.id);
if (BIF_P->common.u.alive.reg)
erts_dsprintf(dsbufp, " (%T)", BIF_P->common.u.alive.reg->name);
erts_dsprintf(dsbufp,
" attempted to enable connection to node %T "
- "which is not able to handle extended references.\n",
+ "which does not support all mandatory capabilities.\n",
BIF_ARG_1);
erts_send_error_to_logger(BIF_P->group_leader, dsbufp);
goto badarg;
}
/*
+ * ToDo: Should we not pass connection_id as well
+ * to make sure it's the right connection we commit.
+ */
+
+ /*
* Arguments seem to be in order.
*/
@@ -2746,90 +3216,120 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
else if (!dep)
goto system_limit; /* Should never happen!!! */
- pp = erts_id2port_sflgs(BIF_ARG_2,
- BIF_P,
- ERTS_PROC_LOCK_MAIN,
- ERTS_PORT_SFLGS_INVALID_LOOKUP);
- erts_smp_de_rwlock(dep);
+ if (is_internal_pid(BIF_ARG_2)) {
+ if (BIF_P->common.id == BIF_ARG_2) {
+ ErtsSetupConnDistCtrl scdc;
- if (!pp || (erts_atomic32_read_nob(&pp->state)
- & ERTS_PORT_SFLG_EXITING))
- goto badarg;
+ scdc.dep = dep;
+ scdc.flags = flags;
+ scdc.version = version;
- if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0)
- goto badarg;
+ res = setup_connection_distctrl(BIF_P, &scdc, NULL, NULL);
+ BUMP_REDS(BIF_P, 5);
+ dep = NULL;
- if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep)
- goto done; /* Already set */
+ if (res == am_badarg)
+ goto badarg;
- if (dep->status & ERTS_DE_SFLG_EXITING) {
- /* Suspend on dist entry waiting for the exit to finish */
- ErtsProcList *plp = erts_proclist_create(BIF_P);
- plp->next = NULL;
- erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
- erts_smp_mtx_lock(&dep->qlock);
- erts_proclist_store_last(&dep->suspended, plp);
- erts_smp_mtx_unlock(&dep->qlock);
- goto yield;
- }
+ ASSERT(is_internal_magic_ref(res));
+ res_tag = am_ok; /* Connection up */
+ }
+ else {
+ ErtsSetupConnDistCtrl *scdcp;
- ASSERT(!(dep->status & ERTS_DE_SFLG_EXITING));
+ scdcp = erts_alloc(ERTS_ALC_T_SETUP_CONN_ARG,
+ sizeof(ErtsSetupConnDistCtrl));
- if (pp->dist_entry || is_not_nil(dep->cid))
- goto badarg;
+ scdcp->dep = dep;
+ scdcp->flags = flags;
+ scdcp->version = version;
- erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
+ res = erts_proc_sig_send_rpc_request(BIF_P,
+ BIF_ARG_2,
+ !0,
+ setup_connection_distctrl,
+ (void *) scdcp);
+ if (is_non_value(res))
+ goto badarg;
- /*
- * Dist-ports do not use the "busy port message queue" functionality, but
- * instead use "busy dist entry" functionality.
- */
- {
- ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
- erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL);
+ dep = NULL;
+
+ ASSERT(is_internal_ordinary_ref(res));
+
+ res_tag = am_message; /* Caller need to wait for dhandle in message */
+ }
+ hp = HAlloc(BIF_P, 3);
}
+ else {
+ Uint32 conn_id;
- pp->dist_entry = dep;
+ pp = erts_id2port_sflgs(BIF_ARG_2,
+ BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ erts_de_rwlock(dep);
+ de_locked = 1;
- dep->version = version;
- dep->creation = 0;
+ if (dep->state != ERTS_DE_STATE_PENDING)
+ goto badarg;
- ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output);
+ if (!pp || (erts_atomic32_read_nob(&pp->state)
+ & ERTS_PORT_SFLG_EXITING))
+ goto badarg;
-#if 1
- dep->send = (pp->drv_ptr->outputv
- ? dist_port_commandv
- : dist_port_command);
-#else
- dep->send = dist_port_command;
-#endif
- ASSERT(dep->send);
+ if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0)
+ goto badarg;
-#ifdef DEBUG
- erts_smp_mtx_lock(&dep->qlock);
- ASSERT(dep->qsize == 0);
- erts_smp_mtx_unlock(&dep->qlock);
-#endif
+ if (erts_prtsd_get(pp, ERTS_PRTSD_DIST_ENTRY) != NULL
+ || is_not_nil(dep->cid))
+ goto badarg;
- erts_set_dist_entry_connected(dep, BIF_ARG_2, flags);
+ erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
- if (flags & DFLAG_DIST_HDR_ATOM_CACHE)
- create_cache(dep);
+ erts_prtsd_set(pp, ERTS_PRTSD_DIST_ENTRY, dep);
+ erts_prtsd_set(pp, ERTS_PRTSD_CONN_ID, (void*)(UWord)dep->connection_id);
- erts_smp_de_rwunlock(dep);
- dep = NULL; /* inc of refc transferred to port (dist_entry field) */
+ ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output);
- inc_no_nodes();
+ dep->send = (pp->drv_ptr->outputv
+ ? dist_port_commandv
+ : dist_port_command);
+ ASSERT(dep->send);
+
+ /*
+ * Dist-ports do not use the "busy port message queue" functionality, but
+ * instead use "busy dist entry" functionality.
+ */
+ {
+ ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
+ erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL);
+ }
+
+ conn_id = dep->connection_id;
+ setup_connection_epiloge_rwunlock(BIF_P, dep, BIF_ARG_2, flags, version);
+ de_locked = 0;
+
+ hp = HAlloc(BIF_P, 3 + ERTS_DHANDLE_SIZE);
+ res = erts_build_dhandle(&hp, &BIF_P->off_heap, dep, conn_id);
+ res_tag = am_ok; /* Connection up */
+ dep = NULL; /* inc of refc transferred to port (dist_entry field) */
+ }
+
+ ASSERT(is_value(res) && is_value(res_tag));
+
+ res = TUPLE2(hp, res_tag, res);
+
+ ERTS_BIF_PREP_RET(ret, res);
- send_nodes_mon_msgs(BIF_P,
- am_nodeup,
- BIF_ARG_1,
- flags & DFLAG_PUBLISHED ? am_visible : am_hidden,
- NIL);
done:
if (dep && dep != erts_this_dist_entry) {
- erts_smp_de_rwunlock(dep);
+ if (de_locked) {
+ if (de_locked > 0)
+ erts_de_rwunlock(dep);
+ else
+ erts_de_runlock(dep);
+ }
erts_deref_dist_entry(dep);
}
@@ -2838,97 +3338,290 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
return ret;
- yield:
- ERTS_BIF_PREP_YIELD3(ret, bif_export[BIF_setnode_3], BIF_P,
- BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
- goto done;
-
badarg:
- ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ ERTS_BIF_PREP_RET(ret, am_badarg);
goto done;
system_limit:
- ERTS_BIF_PREP_ERROR(ret, BIF_P, SYSTEM_LIMIT);
+ ERTS_BIF_PREP_RET(ret, am_system_limit);
goto done;
}
+static void
+setup_connection_epiloge_rwunlock(Process *c_p, DistEntry *dep,
+ Eterm ctrlr, Uint flags,
+ Uint version)
+{
+ Eterm notify_proc = NIL;
+ erts_aint32_t qflgs;
-/**********************************************************************/
-/* dist_exit(Local, Term, Remote) -> Bool */
+ dep->version = version;
+ dep->creation = 0;
+
+ ASSERT(is_internal_port(ctrlr) || is_internal_pid(ctrlr));
+ ASSERT(dep->state == ERTS_DE_STATE_PENDING);
+
+ if (flags & DFLAG_DIST_HDR_ATOM_CACHE)
+ create_cache(dep);
+
+ erts_set_dist_entry_connected(dep, ctrlr, flags);
+
+ notify_proc = NIL;
+ if (erts_atomic_read_nob(&dep->qsize)) {
+ if (is_internal_port(dep->cid)) {
+ erts_schedule_dist_command(NULL, dep);
+ }
+ else {
+ qflgs = erts_atomic32_read_nob(&dep->qflgs);
+ if (qflgs & ERTS_DE_QFLG_REQ_INFO) {
+ qflgs = erts_atomic32_read_band_mb(&dep->qflgs,
+ ~ERTS_DE_QFLG_REQ_INFO);
+ if (qflgs & ERTS_DE_QFLG_REQ_INFO) {
+ notify_proc = dep->cid;
+ ASSERT(is_internal_pid(notify_proc));
+ }
+ }
+ }
+ }
+
+ erts_de_rwunlock(dep);
+
+ if (is_internal_pid(notify_proc))
+ notify_dist_data(c_p, notify_proc);
+
+ inc_no_nodes();
+
+ send_nodes_mon_msgs(c_p,
+ am_nodeup,
+ dep->sysname,
+ flags & DFLAG_PUBLISHED ? am_visible : am_hidden,
+ NIL);
+}
+
+static Eterm
+setup_connection_distctrl(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
+{
+ ErtsSetupConnDistCtrl *scdcp = (ErtsSetupConnDistCtrl *) arg;
+ DistEntry *dep = scdcp->dep;
+ int dep_locked = 0;
+ Eterm *hp;
+ Uint32 conn_id;
+
+ if (redsp)
+ *redsp = 1;
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+
+ erts_de_rwlock(dep);
+ dep_locked = !0;
+
+ if (dep->state != ERTS_DE_STATE_PENDING)
+ goto badarg;
+
+ conn_id = dep->connection_id;
+
+ if (is_not_nil(dep->cid))
+ goto badarg;
+
+ c_p->flags |= F_DISTRIBUTION;
+ ERTS_PROC_SET_DIST_ENTRY(c_p, dep);
+
+ dep->send = NULL; /* Only for distr ports... */
+
+ if (redsp)
+ *redsp = 5;
+
+ setup_connection_epiloge_rwunlock(c_p, dep, c_p->common.id,
+ scdcp->flags, scdcp->version);
+
+ /* we take over previous inc in refc of dep */
+
+ if (!bpp) /* called directly... */
+ return erts_make_dhandle(c_p, dep, conn_id);
+
+ erts_free(ERTS_ALC_T_SETUP_CONN_ARG, arg);
-BIF_RETTYPE dist_exit_3(BIF_ALIST_3)
+ *bpp = new_message_buffer(ERTS_DHANDLE_SIZE);
+ hp = (*bpp)->mem;
+ return erts_build_dhandle(&hp, &(*bpp)->off_heap, dep, conn_id);
+
+badarg:
+
+ if (bpp) /* not called directly */
+ erts_free(ERTS_ALC_T_SETUP_CONN_ARG, arg);
+
+ if (dep_locked)
+ erts_de_rwunlock(dep);
+
+ erts_deref_dist_entry(dep);
+
+ return am_badarg;
+}
+
+
+BIF_RETTYPE erts_internal_get_dflags_0(BIF_ALIST_0)
{
- Eterm local;
- Eterm remote;
- DistEntry *rdep;
+ return erts_dflags_record;
+}
- local = BIF_ARG_1;
- remote = BIF_ARG_3;
+BIF_RETTYPE erts_internal_new_connection_1(BIF_ALIST_1)
+{
+ DistEntry* dep;
+ Uint32 conn_id;
+ Eterm* hp;
+ Eterm dhandle;
- /* Check that remote is a remote process */
- if (is_not_external_pid(remote))
- goto error;
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ dep = erts_find_or_insert_dist_entry(BIF_ARG_1);
- rdep = external_dist_entry(remote);
-
- if(rdep == erts_this_dist_entry)
- goto error;
+ if (dep == erts_this_dist_entry) {
+ erts_deref_dist_entry(dep);
+ BIF_ERROR(BIF_P, BADARG);
+ }
- /* Check that local is local */
- if (is_internal_pid(local)) {
- Process *lp;
- ErtsProcLocks lp_locks;
- if (BIF_P->common.id == local) {
- lp_locks = ERTS_PROC_LOCKS_ALL;
- lp = BIF_P;
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- else {
- lp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- lp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- local, lp_locks);
- if (!lp) {
- BIF_RET(am_true); /* ignore */
- }
- }
-
- (void) erts_send_exit_signal(BIF_P,
- remote,
- lp,
- &lp_locks,
- BIF_ARG_2,
- NIL,
- NULL,
- 0);
-#ifdef ERTS_SMP
- if (lp == BIF_P)
- lp_locks &= ~ERTS_PROC_LOCK_MAIN;
-#endif
- erts_smp_proc_unlock(lp, lp_locks);
- if (lp == BIF_P) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&BIF_P->state);
- /*
- * We may have exited current process and may have to take action.
- */
- if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
- if (state & ERTS_PSFLG_PENDING_EXIT)
- erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
- ERTS_BIF_EXITED(BIF_P);
- }
- }
+ erts_de_rwlock(dep);
+
+ switch (dep->state) {
+ case ERTS_DE_STATE_CONNECTED:
+ case ERTS_DE_STATE_EXITING:
+ case ERTS_DE_STATE_PENDING:
+ conn_id = dep->connection_id;
+ break;
+ case ERTS_DE_STATE_IDLE:
+ erts_set_dist_entry_pending(dep);
+ conn_id = dep->connection_id;
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "Invalid dep->state (%d)\n", dep->state);
+ }
+ erts_de_rwunlock(dep);
+ hp = HAlloc(BIF_P, ERTS_DHANDLE_SIZE);
+ dhandle = erts_build_dhandle(&hp, &BIF_P->off_heap, dep, conn_id);
+ erts_deref_dist_entry(dep);
+ BIF_RET(dhandle);
+}
+
+Sint erts_abort_connection_rwunlock(DistEntry* dep)
+{
+ ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
+
+ if (dep->state == ERTS_DE_STATE_CONNECTED) {
+ kill_connection(dep);
}
- else if (is_external_pid(local)
- && external_dist_entry(local) == erts_this_dist_entry) {
- BIF_RET(am_true); /* ignore */
+ else if (dep->state == ERTS_DE_STATE_PENDING) {
+ ErtsAtomCache *cache;
+ ErtsDistOutputBuf *obuf;
+ ErtsProcList *resume_procs;
+ Sint reds = 0;
+ ErtsMonLnkDist *mld;
+
+ ASSERT(is_nil(dep->cid));
+
+ mld = dep->mld;
+ dep->mld = NULL;
+
+ cache = dep->cache;
+ dep->cache = NULL;
+ erts_mtx_lock(&dep->qlock);
+ obuf = dep->out_queue.first;
+ dep->out_queue.first = NULL;
+ dep->out_queue.last = NULL;
+ ASSERT(!dep->tmp_out_queue.first);
+ ASSERT(!dep->finalized_out_queue.first);
+ resume_procs = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL);
+ erts_mtx_unlock(&dep->qlock);
+ erts_atomic_set_nob(&dep->dist_cmd_scheduled, 0);
+ dep->send = NULL;
+
+ erts_set_dist_entry_not_connected(dep);
+ erts_de_rwunlock(dep);
+
+ schedule_con_monitor_link_cleanup(mld, THE_NON_VALUE,
+ THE_NON_VALUE, THE_NON_VALUE);
+
+ if (resume_procs) {
+ int resumed = erts_resume_processes(resume_procs);
+ reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED;
+ }
+
+ delete_cache(cache);
+ free_de_out_queues(dep, obuf);
+ return reds;
}
- else
- goto error;
+ erts_de_rwunlock(dep);
+ return 0;
+}
+
+static Sint abort_connection(DistEntry *dep, Uint32 conn_id)
+{
+ erts_de_rwlock(dep);
+ if (dep->connection_id == conn_id)
+ return erts_abort_connection_rwunlock(dep);
+ erts_de_rwunlock(dep);
+ return 0;
+}
+
+BIF_RETTYPE erts_internal_abort_connection_2(BIF_ALIST_2)
+{
+ DistEntry* dep;
+ Uint32 conn_id;
+ Sint reds;
+
+ if (is_not_atom(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+ dep = erts_dhandle_to_dist_entry(BIF_ARG_2, &conn_id);
+ if (!dep || dep != erts_find_dist_entry(BIF_ARG_1)
+ || dep == erts_this_dist_entry) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ reds = abort_connection(dep, conn_id);
+ BUMP_REDS(BIF_P, reds);
BIF_RET(am_true);
+}
- error:
- BIF_ERROR(BIF_P, BADARG);
+int erts_auto_connect(DistEntry* dep, Process *proc, ErtsProcLocks proc_locks)
+{
+ erts_de_rwlock(dep);
+ if (dep->state != ERTS_DE_STATE_IDLE) {
+ erts_de_rwunlock(dep);
+ }
+ else {
+ Process* net_kernel;
+ ErtsProcLocks nk_locks = ERTS_PROC_LOCK_MSGQ;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+ Eterm msg, dhandle;
+ Uint32 conn_id;
+
+ erts_set_dist_entry_pending(dep);
+ conn_id = dep->connection_id;
+ erts_de_rwunlock(dep);
+
+ net_kernel = erts_whereis_process(proc, proc_locks,
+ am_net_kernel, nk_locks, 0);
+ if (!net_kernel) {
+ abort_connection(dep, conn_id);
+ return 0;
+ }
+
+ /*
+ * Send {auto_connect, Node, DHandle} to net_kernel
+ */
+ mp = erts_alloc_message_heap(net_kernel, &nk_locks,
+ 4 + ERTS_DHANDLE_SIZE,
+ &hp, &ohp);
+ dhandle = erts_build_dhandle(&hp, ohp, dep, conn_id);
+ msg = TUPLE3(hp, am_auto_connect, dep->sysname, dhandle);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_proc_message(proc, net_kernel, nk_locks, mp, msg);
+ erts_proc_unlock(net_kernel, nk_locks);
+ }
+
+ return 1;
}
/**********************************************************************/
@@ -3000,13 +3693,15 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
length = 0;
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
ASSERT(erts_no_of_not_connected_dist_entries > 0);
ASSERT(erts_no_of_hidden_dist_entries >= 0);
+ ASSERT(erts_no_of_pending_dist_entries >= 0);
ASSERT(erts_no_of_visible_dist_entries >= 0);
if(not_connected)
- length += (erts_no_of_not_connected_dist_entries - 1);
+ length += ((erts_no_of_not_connected_dist_entries - 1)
+ + erts_no_of_pending_dist_entries);
if(hidden)
length += erts_no_of_hidden_dist_entries;
if(visible)
@@ -3017,7 +3712,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
result = NIL;
if (length == 0) {
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
goto done;
}
@@ -3026,13 +3721,18 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
#ifdef DEBUG
endp = hp + length*2;
#endif
- if(not_connected)
+ if(not_connected) {
for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
if (dep != erts_this_dist_entry) {
result = CONS(hp, dep->sysname, result);
hp += 2;
}
+ }
+ for(dep = erts_pending_dist_entries; dep; dep = dep->next) {
+ result = CONS(hp, dep->sysname, result);
+ hp += 2;
}
+ }
if(hidden)
for(dep = erts_hidden_dist_entries; dep; dep = dep->next) {
result = CONS(hp, dep->sysname, result);
@@ -3048,7 +3748,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1)
hp += 2;
}
ASSERT(endp == hp);
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
done:
UnUseTmpHeap(2,BIF_P);
@@ -3074,76 +3774,179 @@ BIF_RETTYPE is_alive_0(BIF_ALIST_0)
static BIF_RETTYPE
monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options)
{
- DistEntry *dep;
- ErtsLink *lnk;
+ BIF_RETTYPE ret;
+ DistEntry *dep = NULL;
Eterm l;
+ int async_connect = 1;
for (l = Options; l != NIL && is_list(l); l = CDR(list_val(l))) {
Eterm t = CAR(list_val(l));
- /* allow_passive_connect the only available option right now */
- if (t != am_allow_passive_connect) {
+ if (t == am_allow_passive_connect) {
+ /*
+ * Handle this horrible feature by falling back on old synchronous
+ * auto-connect (if needed)
+ */
+ async_connect = 0;
+ } else {
BIF_ERROR(p, BADARG);
}
}
if (l != NIL) {
BIF_ERROR(p, BADARG);
}
+ if (l != NIL)
+ goto badarg;
- if (is_not_atom(Node) ||
- ((Bool != am_true) && (Bool != am_false)) ||
- ((erts_this_node->sysname == am_Noname)
- && (Node != erts_this_node->sysname))) {
- BIF_ERROR(p, BADARG);
- }
- dep = erts_sysname_to_connected_dist_entry(Node);
- if (!dep) {
- do_trap:
- BIF_TRAP3(dmonitor_node_trap, p, Node, Bool, Options);
+ if (is_not_atom(Node))
+ goto badarg;
+
+ if (erts_this_node->sysname == am_Noname && Node != am_Noname)
+ goto badarg;
+
+ switch (Bool) {
+
+ case am_false: {
+ ErtsMonitor *mon;
+ /*
+ * Before OTP-21, monitor_node(Node, false) triggered
+ * auto-connect and a 'nodedown' message if that failed.
+ * Now it's a simple no-op which feels more reasonable.
+ */
+ mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(p), Node);
+ if (mon) {
+ ErtsMonitorDataExtended *mdep;
+ ASSERT(erts_monitor_is_origin(mon));
+
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
+
+ ASSERT((mdep->u.refc > 0));
+ if (--mdep->u.refc == 0) {
+ if (!mdep->uptr.node_monitors)
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(p), mon);
+ else {
+ ErtsMonitor *sub_mon;
+ ErtsMonitorDataExtended *sub_mdep;
+ sub_mon = erts_monitor_list_last(mdep->uptr.node_monitors);
+ erts_monitor_list_delete(&mdep->uptr.node_monitors, sub_mon);
+ sub_mon->flags &= ~ERTS_ML_FLG_IN_SUBTABLE;
+ sub_mdep = ((ErtsMonitorDataExtended *)
+ erts_monitor_to_data(sub_mon));
+ sub_mdep->uptr.node_monitors = mdep->uptr.node_monitors;
+ mdep->uptr.node_monitors = NULL;
+ erts_monitor_tree_replace(&ERTS_P_MONITORS(p), mon, sub_mon);
+ }
+ if (erts_monitor_dist_delete(&mdep->md.target))
+ erts_monitor_release_both((ErtsMonitorData *) mdep);
+ else
+ erts_monitor_release(mon);
+ }
+ }
+ break;
}
- if (dep == erts_this_dist_entry)
- goto done;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
- erts_smp_de_rlock(dep);
- if (ERTS_DE_IS_NOT_CONNECTED(dep)) {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
- erts_smp_de_runlock(dep);
- goto do_trap;
- }
- erts_smp_de_links_lock(dep);
- erts_smp_de_runlock(dep);
-
- if (Bool == am_true) {
- ASSERT(dep->cid != NIL);
- lnk = erts_add_or_lookup_link(&(dep->node_links), LINK_NODE,
- p->common.id);
- ++ERTS_LINK_REFC(lnk);
- lnk = erts_add_or_lookup_link(&ERTS_P_LINKS(p), LINK_NODE, Node);
- ++ERTS_LINK_REFC(lnk);
- }
- else {
- lnk = erts_lookup_link(dep->node_links, p->common.id);
- if (lnk != NULL) {
- if ((--ERTS_LINK_REFC(lnk)) == 0) {
- erts_destroy_link(erts_remove_link(&(dep->node_links),
- p->common.id));
- }
- }
- lnk = erts_lookup_link(ERTS_P_LINKS(p), Node);
- if (lnk != NULL) {
- if ((--ERTS_LINK_REFC(lnk)) == 0) {
- erts_destroy_link(erts_remove_link(&ERTS_P_LINKS(p),
- Node));
+ case am_true: {
+ ErtsDSigData dsd;
+ dsd.node = Node;
+
+ dep = erts_find_or_insert_dist_entry(Node);
+ if (dep == erts_this_dist_entry)
+ break;
+
+ switch (erts_dsig_prepare(&dsd, dep, p,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_RLOCK, 0, async_connect)) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ /* Trap to either send 'nodedown' or do passive connection attempt */
+ goto do_trap;
+ case ERTS_DSIG_PREP_PENDING:
+ if (!async_connect) {
+ /*
+ * Pending connection may fail, so we must trap
+ * to ensure passive connection attempt
+ */
+ erts_de_runlock(dep);
+ goto do_trap;
}
- }
+ /*fall through*/
+ case ERTS_DSIG_PREP_CONNECTED: {
+ ErtsMonitor *mon;
+ ErtsMonitorDataExtended *mdep;
+ int created;
+
+ mon = erts_monitor_tree_lookup_create(&ERTS_P_MONITORS(p),
+ &created,
+ ERTS_MON_TYPE_NODE,
+ p->common.id,
+ Node);
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
+ if (created) {
+#ifdef DEBUG
+ int inserted =
+#endif
+ erts_monitor_dist_insert(&mdep->md.target, dep->mld);
+ ASSERT(inserted);
+ ASSERT(mdep->dist->connection_id == dep->connection_id);
+ }
+ else if (mdep->dist->connection_id != dep->connection_id) {
+ ErtsMonitorDataExtended *mdep2;
+ ErtsMonitor *mon2;
+#ifdef DEBUG
+ int inserted;
+#endif
+ mdep2 = ((ErtsMonitorDataExtended *)
+ erts_monitor_create(ERTS_MON_TYPE_NODE, NIL,
+ p->common.id, Node, NIL));
+ mon2 = &mdep2->md.origin;
+#ifdef DEBUG
+ inserted =
+#endif
+ erts_monitor_dist_insert(&mdep->md.target, dep->mld);
+ ASSERT(inserted);
+ ASSERT(mdep2->dist->connection_id == dep->connection_id);
+
+ mdep2->uptr.node_monitors = mdep->uptr.node_monitors;
+ mdep->uptr.node_monitors = NULL;
+ erts_monitor_tree_replace(&ERTS_P_MONITORS(p), mon, mon2);
+ erts_monitor_list_insert(&mdep2->uptr.node_monitors, mon);
+ mon->flags |= ERTS_ML_FLG_IN_SUBTABLE;
+ mdep = mdep2;
+ }
+
+ mdep->u.refc++;
+
+ break;
+ }
+
+ default:
+ ERTS_ASSERT(! "Invalid dsig prepare result");
+ }
+
+ erts_de_runlock(dep);
+
+ break;
}
- erts_smp_de_links_unlock(dep);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ default:
+ goto badarg;
+ }
- done:
- erts_deref_dist_entry(dep);
- BIF_RET(am_true);
+ ERTS_BIF_PREP_RET(ret, am_true);
+
+do_return:
+
+ if (dep)
+ erts_deref_dist_entry(dep);
+
+ return ret;
+
+do_trap:
+ ERTS_BIF_PREP_TRAP3(ret, dmonitor_node_trap, p, Node, Bool, Options);
+ goto do_return;
+
+badarg:
+ ERTS_BIF_PREP_ERROR(ret, p, BADARG);
+ goto do_return;
}
BIF_RETTYPE monitor_node_3(BIF_ALIST_3)
@@ -3171,9 +3974,9 @@ BIF_RETTYPE net_kernel_dflag_unicode_io_1(BIF_ALIST_1)
if (de == erts_this_dist_entry) {
BIF_RET(am_true);
}
- erts_smp_de_rlock(de);
+ erts_de_rlock(de);
f = de->flags;
- erts_smp_de_runlock(de);
+ erts_de_runlock(de);
BIF_RET(((f & DFLAG_UNICODE_IO) ? am_true : am_false));
}
@@ -3194,18 +3997,9 @@ BIF_RETTYPE net_kernel_dflag_unicode_io_1(BIF_ALIST_1)
#define ERTS_NODES_MON_OPT_TYPES \
(ERTS_NODES_MON_OPT_TYPE_VISIBLE|ERTS_NODES_MON_OPT_TYPE_HIDDEN)
-typedef struct ErtsNodesMonitor_ ErtsNodesMonitor;
-struct ErtsNodesMonitor_ {
- ErtsNodesMonitor *prev;
- ErtsNodesMonitor *next;
- Process *proc;
- Uint16 opts;
- Uint16 no;
-};
-
-static erts_smp_mtx_t nodes_monitors_mtx;
-static ErtsNodesMonitor *nodes_monitors;
-static ErtsNodesMonitor *nodes_monitors_end;
+static erts_mtx_t nodes_monitors_mtx;
+static ErtsMonitor *nodes_monitors;
+static Uint no_nodes_monitors;
/*
* Nodes monitors are stored in a double linked list. 'nodes_monitors'
@@ -3221,111 +4015,172 @@ static ErtsNodesMonitor *nodes_monitors_end;
static void
init_nodes_monitors(void)
{
- erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors");
+ erts_mtx_init(&nodes_monitors_mtx, "nodes_monitors", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
nodes_monitors = NULL;
- nodes_monitors_end = NULL;
+ no_nodes_monitors = 0;
}
-static ERTS_INLINE Uint
-nodes_mon_msg_sz(ErtsNodesMonitor *nmp, Eterm what, Eterm reason)
+Eterm
+erts_monitor_nodes(Process *c_p, Eterm on, Eterm olist)
{
- Uint sz;
- if (!nmp->opts) {
- sz = 3;
- }
- else {
- sz = 0;
+ Eterm key, old_value, opts_list = olist;
+ Uint opts = (Uint) 0;
- if (nmp->opts & ERTS_NODES_MON_OPT_TYPES)
- sz += 2 + 3;
+ ASSERT(c_p);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+
+ if (on != am_true && on != am_false)
+ return THE_NON_VALUE;
+
+ if (is_not_nil(opts_list)) {
+ int all = 0, visible = 0, hidden = 0;
- if (what == am_nodedown
- && (nmp->opts & ERTS_NODES_MON_OPT_DOWN_REASON)) {
- if (is_not_immed(reason))
- sz += size_object(reason);
- sz += 2 + 3;
+ while (is_list(opts_list)) {
+ Eterm *cp = list_val(opts_list);
+ Eterm opt = CAR(cp);
+ opts_list = CDR(cp);
+ if (opt == am_nodedown_reason)
+ opts |= ERTS_NODES_MON_OPT_DOWN_REASON;
+ else if (is_tuple(opt)) {
+ Eterm* tp = tuple_val(opt);
+ if (arityval(tp[0]) != 2)
+ return THE_NON_VALUE;
+ switch (tp[1]) {
+ case am_node_type:
+ switch (tp[2]) {
+ case am_visible:
+ if (hidden || all)
+ return THE_NON_VALUE;
+ opts |= ERTS_NODES_MON_OPT_TYPE_VISIBLE;
+ visible = 1;
+ break;
+ case am_hidden:
+ if (visible || all)
+ return THE_NON_VALUE;
+ opts |= ERTS_NODES_MON_OPT_TYPE_HIDDEN;
+ hidden = 1;
+ break;
+ case am_all:
+ if (visible || hidden)
+ return THE_NON_VALUE;
+ opts |= ERTS_NODES_MON_OPT_TYPES;
+ all = 1;
+ break;
+ default:
+ return THE_NON_VALUE;
+ }
+ break;
+ default:
+ return THE_NON_VALUE;
+ }
+ }
+ else {
+ return THE_NON_VALUE;
+ }
}
- sz += 4;
+ if (is_not_nil(opts_list))
+ return THE_NON_VALUE;
+ }
+
+ key = make_small(opts);
+
+ if (on == am_true) {
+ ErtsMonitorDataExtended *mdep;
+ ErtsMonitor *omon;
+ int created;
+ omon = erts_monitor_tree_lookup_create(&ERTS_P_MONITORS(c_p),
+ &created,
+ ERTS_MON_TYPE_NODES,
+ c_p->common.id,
+ key);
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(omon);
+ if (created) {
+ erts_mtx_lock(&nodes_monitors_mtx);
+ no_nodes_monitors++;
+ erts_monitor_list_insert(&nodes_monitors, &mdep->md.target);
+ erts_mtx_unlock(&nodes_monitors_mtx);
+ }
+ old_value = mdep->u.refc;
+ mdep->u.refc++;
+ }
+ else {
+ ErtsMonitorDataExtended *mdep;
+ ErtsMonitor *omon;
+ omon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p), key);
+ if (!omon)
+ old_value = 0;
+ else {
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(omon);
+ old_value = mdep->u.refc;
+ ASSERT(mdep->u.refc > 0);
+ erts_mtx_lock(&nodes_monitors_mtx);
+ ASSERT(no_nodes_monitors > 0);
+ no_nodes_monitors--;
+ ASSERT(erts_monitor_is_in_table(&mdep->md.target));
+ erts_monitor_list_delete(&nodes_monitors, &mdep->md.target);
+ erts_mtx_unlock(&nodes_monitors_mtx);
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), omon);
+ erts_monitor_release_both((ErtsMonitorData *) mdep);
+ }
}
- return sz;
+
+ return erts_make_integer(old_value, c_p);
}
-static ERTS_INLINE void
-send_nodes_mon_msg(Process *rp,
- ErtsProcLocks *rp_locksp,
- ErtsNodesMonitor *nmp,
- Eterm node,
- Eterm what,
- Eterm type,
- Eterm reason,
- Uint sz)
+void
+erts_monitor_nodes_delete(ErtsMonitor *omon)
{
- Eterm msg;
- Eterm *hp;
- ErtsMessage *mp;
- ErlOffHeap *ohp;
-#ifdef DEBUG
- Eterm *hend;
-#endif
+ ErtsMonitorData *mdp;
- mp = erts_alloc_message_heap(rp, rp_locksp, sz, &hp, &ohp);
-#ifdef DEBUG
- hend = hp + sz;
-#endif
+ ASSERT(omon->type == ERTS_MON_TYPE_NODES);
+ ASSERT(erts_monitor_is_origin(omon));
- if (!nmp->opts) {
- msg = TUPLE2(hp, what, node);
-#ifdef DEBUG
- hp += 3;
-#endif
- }
- else {
- Eterm tup;
- Eterm info = NIL;
+ mdp = erts_monitor_to_data(omon);
- if (nmp->opts & (ERTS_NODES_MON_OPT_TYPE_VISIBLE
- | ERTS_NODES_MON_OPT_TYPE_HIDDEN)) {
+ erts_mtx_lock(&nodes_monitors_mtx);
+ ASSERT(erts_monitor_is_in_table(&mdp->target));
+ ASSERT(no_nodes_monitors > 0);
+ no_nodes_monitors--;
+ erts_monitor_list_delete(&nodes_monitors, &mdp->target);
+ erts_mtx_unlock(&nodes_monitors_mtx);
+ erts_monitor_release_both(mdp);
+}
- tup = TUPLE2(hp, am_node_type, type);
- hp += 3;
- info = CONS(hp, tup, info);
- hp += 2;
- }
+typedef struct {
+ Eterm pid;
+ Eterm options;
+} ErtsNodesMonitorData;
- if (what == am_nodedown
- && (nmp->opts & ERTS_NODES_MON_OPT_DOWN_REASON)) {
- Eterm rsn_cpy;
-
- if (is_immed(reason))
- rsn_cpy = reason;
- else {
- Eterm rsn_sz = size_object(reason);
- rsn_cpy = copy_struct(reason, rsn_sz, &hp, ohp);
- }
+typedef struct {
+ ErtsNodesMonitorData *nmdp;
+ Uint i;
+} ErtsNodesMonitorContext;
- tup = TUPLE2(hp, am_nodedown_reason, rsn_cpy);
- hp += 3;
- info = CONS(hp, tup, info);
- hp += 2;
- }
+static void
+save_nodes_monitor(ErtsMonitor *mon, void *vctxt)
+{
+ ErtsNodesMonitorContext *ctxt = vctxt;
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
- msg = TUPLE3(hp, what, node, info);
-#ifdef DEBUG
- hp += 4;
-#endif
- }
+ ASSERT(erts_monitor_is_target(mon));
+ ASSERT(mon->type == ERTS_MON_TYPE_NODES);
- ASSERT(hend == hp);
- erts_queue_message(rp, *rp_locksp, mp, msg, am_system);
+ ctxt->nmdp[ctxt->i].pid = mon->other.item;
+ ctxt->nmdp[ctxt->i].options = mdp->origin.other.item;
+
+ ctxt->i++;
}
static void
send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reason)
{
- ErtsNodesMonitor *nmp;
- ErtsProcLocks rp_locks = 0; /* Init to shut up false warning */
- Process *rp = NULL;
+ Uint opts;
+ Uint i, no, reason_size;
+ ErtsNodesMonitorData def_buf[100];
+ ErtsNodesMonitorData *nmdp = &def_buf[0];
+ ErtsNodesMonitorContext ctxt;
ASSERT(is_immed(what));
ASSERT(is_immed(node));
@@ -3346,31 +4201,44 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas
}
#endif
- ERTS_SMP_LC_ASSERT(!c_p
- || (erts_proc_lc_my_proc_locks(c_p)
- == ERTS_PROC_LOCK_MAIN));
- erts_smp_mtx_lock(&nodes_monitors_mtx);
+ ctxt.i = 0;
+
+ reason_size = is_immed(reason) ? 0 : size_object(reason);
- for (nmp = nodes_monitors; nmp; nmp = nmp->next) {
- int i;
- Uint16 no;
- Uint sz;
+ erts_mtx_lock(&nodes_monitors_mtx);
+ if (no_nodes_monitors > sizeof(def_buf)/sizeof(def_buf[0]))
+ nmdp = erts_alloc(ERTS_ALC_T_TMP,
+ no_nodes_monitors*sizeof(ErtsNodesMonitorData));
+ ctxt.nmdp = nmdp;
+ erts_monitor_list_foreach(nodes_monitors,
+ save_nodes_monitor,
+ (void *) &ctxt);
+
+ ASSERT(ctxt.i == no_nodes_monitors);
+ no = no_nodes_monitors;
+
+ erts_mtx_unlock(&nodes_monitors_mtx);
- ASSERT(nmp->proc != NULL);
+ for (i = 0; i < no; i++) {
+ Eterm tmp_heap[3+2+3+2+4 /* max need */];
+ Eterm *hp, msg;
+ Uint hsz;
- if (!nmp->opts) {
+ ASSERT(is_small(nmdp[i].options));
+ opts = (Uint) signed_val(nmdp[i].options);
+ if (!opts) {
if (type != am_visible)
continue;
}
else {
switch (type) {
case am_hidden:
- if (!(nmp->opts & ERTS_NODES_MON_OPT_TYPE_HIDDEN))
+ if (!(opts & ERTS_NODES_MON_OPT_TYPE_HIDDEN))
continue;
break;
case am_visible:
- if ((nmp->opts & ERTS_NODES_MON_OPT_TYPES)
- && !(nmp->opts & ERTS_NODES_MON_OPT_TYPE_VISIBLE))
+ if ((opts & ERTS_NODES_MON_OPT_TYPES)
+ && !(opts & ERTS_NODES_MON_OPT_TYPE_VISIBLE))
continue;
break;
default:
@@ -3378,342 +4246,162 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas
}
}
- if (rp != nmp->proc) {
- if (rp) {
- if (rp == c_p)
- rp_locks &= ~ERTS_PROC_LOCK_MAIN;
- erts_smp_proc_unlock(rp, rp_locks);
- }
+ hsz = 0;
+ hp = &tmp_heap[0];
- rp = nmp->proc;
- rp_locks = 0;
- if (rp == c_p)
- rp_locks |= ERTS_PROC_LOCK_MAIN;
- }
+ if (!opts) {
+ msg = TUPLE2(hp, what, node);
+ hp += 3;
+ }
+ else {
+ Eterm tup;
+ Eterm info = NIL;
+
+ if (opts & (ERTS_NODES_MON_OPT_TYPE_VISIBLE
+ | ERTS_NODES_MON_OPT_TYPE_HIDDEN)) {
+
+ tup = TUPLE2(hp, am_node_type, type);
+ hp += 3;
+ info = CONS(hp, tup, info);
+ hp += 2;
+ }
+
+ if (what == am_nodedown
+ && (opts & ERTS_NODES_MON_OPT_DOWN_REASON)) {
+ hsz += reason_size;
+ tup = TUPLE2(hp, am_nodedown_reason, reason);
+ hp += 3;
+ info = CONS(hp, tup, info);
+ hp += 2;
+ }
+
+ msg = TUPLE3(hp, what, node, info);
+ hp += 4;
+ }
- ASSERT(rp);
+ ASSERT(hp - &tmp_heap[0] <= sizeof(tmp_heap)/sizeof(tmp_heap[0]));
- sz = nodes_mon_msg_sz(nmp, what, reason);
+ hsz += hp - &tmp_heap[0];
- for (i = 0, no = nmp->no; i < no; i++)
- send_nodes_mon_msg(rp,
- &rp_locks,
- nmp,
- node,
- what,
- type,
- reason,
- sz);
+ erts_proc_sig_send_persistent_monitor_msg(ERTS_MON_TYPE_NODES,
+ nmdp[i].options,
+ am_system,
+ nmdp[i].pid,
+ msg,
+ hsz);
}
- if (rp) {
- if (rp == c_p)
- rp_locks &= ~ERTS_PROC_LOCK_MAIN;
- erts_smp_proc_unlock(rp, rp_locks);
- }
-
- erts_smp_mtx_unlock(&nodes_monitors_mtx);
+ if (nmdp != &def_buf[0])
+ erts_free(ERTS_ALC_T_TMP, nmdp);
}
+
-static Eterm
-insert_nodes_monitor(Process *c_p, Uint32 opts)
-{
- Uint16 no = 1;
- Eterm res = am_false;
- ErtsNodesMonitor *xnmp, *nmp;
-
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx));
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
-
- xnmp = c_p->nodes_monitors;
- if (xnmp) {
- ASSERT(!xnmp->prev || xnmp->prev->proc != c_p);
-
- while (1) {
- ASSERT(xnmp->proc == c_p);
- if (xnmp->opts == opts)
- break;
- if (!xnmp->next || xnmp->next->proc != c_p)
- break;
- xnmp = xnmp->next;
- }
- ASSERT(xnmp);
- ASSERT(xnmp->proc == c_p);
- ASSERT(xnmp->opts == opts
- || !xnmp->next
- || xnmp->next->proc != c_p);
-
- if (xnmp->opts != opts)
- goto alloc_new;
- else {
- res = am_true;
- no = xnmp->no++;
- if (!xnmp->no) {
- /*
- * 'no' wrapped; transfer all prevous monitors to new
- * element (which will be the next element in the list)
- * and set this to one...
- */
- xnmp->no = 1;
- goto alloc_new;
- }
- }
- }
- else {
- alloc_new:
- nmp = erts_alloc(ERTS_ALC_T_NODES_MON, sizeof(ErtsNodesMonitor));
- nmp->proc = c_p;
- nmp->opts = opts;
- nmp->no = no;
-
- if (xnmp) {
- ASSERT(nodes_monitors);
- ASSERT(c_p->nodes_monitors);
- nmp->next = xnmp->next;
- nmp->prev = xnmp;
- xnmp->next = nmp;
- if (nmp->next) {
- ASSERT(nodes_monitors_end != xnmp);
- ASSERT(nmp->next->prev == xnmp);
- nmp->next->prev = nmp;
- }
- else {
- ASSERT(nodes_monitors_end == xnmp);
- nodes_monitors_end = nmp;
- }
- }
- else {
- ASSERT(!c_p->nodes_monitors);
- c_p->nodes_monitors = nmp;
- nmp->next = NULL;
- nmp->prev = nodes_monitors_end;
- if (nodes_monitors_end) {
- ASSERT(nodes_monitors);
- nodes_monitors_end->next = nmp;
- }
- else {
- ASSERT(!nodes_monitors);
- nodes_monitors = nmp;
- }
- nodes_monitors_end = nmp;
- }
- }
- return res;
-}
-
-static Eterm
-remove_nodes_monitors(Process *c_p, Uint32 opts, int all)
-{
- Eterm res = am_false;
- ErtsNodesMonitor *nmp;
-
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx));
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
-
- nmp = c_p->nodes_monitors;
- ASSERT(!nmp || !nmp->prev || nmp->prev->proc != c_p);
-
- while (nmp && nmp->proc == c_p) {
- if (!all && nmp->opts != opts)
- nmp = nmp->next;
- else { /* if (all || nmp->opts == opts) */
- ErtsNodesMonitor *free_nmp;
- res = am_true;
- if (nmp->prev) {
- ASSERT(nodes_monitors != nmp);
- nmp->prev->next = nmp->next;
- }
- else {
- ASSERT(nodes_monitors == nmp);
- nodes_monitors = nmp->next;
- }
- if (nmp->next) {
- ASSERT(nodes_monitors_end != nmp);
- nmp->next->prev = nmp->prev;
- }
- else {
- ASSERT(nodes_monitors_end == nmp);
- nodes_monitors_end = nmp->prev;
- }
- free_nmp = nmp;
- nmp = nmp->next;
- if (c_p->nodes_monitors == free_nmp)
- c_p->nodes_monitors = nmp && nmp->proc == c_p ? nmp : NULL;
- erts_free(ERTS_ALC_T_NODES_MON, free_nmp);
- }
- }
-
- ASSERT(!all || !c_p->nodes_monitors);
- return res;
-}
-
-void
-erts_delete_nodes_monitors(Process *c_p, ErtsProcLocks locks)
-{
-#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
- if (c_p) {
- ErtsProcLocks might_unlock = locks & ~ERTS_PROC_LOCK_MAIN;
- if (might_unlock)
- erts_proc_lc_might_unlock(c_p, might_unlock);
- }
-#endif
- if (erts_smp_mtx_trylock(&nodes_monitors_mtx) == EBUSY) {
- ErtsProcLocks unlock_locks = locks & ~ERTS_PROC_LOCK_MAIN;
- if (c_p && unlock_locks)
- erts_smp_proc_unlock(c_p, unlock_locks);
- erts_smp_mtx_lock(&nodes_monitors_mtx);
- if (c_p && unlock_locks)
- erts_smp_proc_lock(c_p, unlock_locks);
- }
- remove_nodes_monitors(c_p, 0, 1);
- erts_smp_mtx_unlock(&nodes_monitors_mtx);
-}
-
-Eterm
-erts_monitor_nodes(Process *c_p, Eterm on, Eterm olist)
-{
+typedef struct {
+ Eterm **hpp;
+ Uint *szp;
Eterm res;
- Eterm opts_list = olist;
- Uint16 opts = (Uint16) 0;
+} ErtsNodesMonitorInfoContext;
- ASSERT(c_p);
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
- if (on != am_true && on != am_false)
- return THE_NON_VALUE;
-
- if (is_not_nil(opts_list)) {
- int all = 0, visible = 0, hidden = 0;
-
- while (is_list(opts_list)) {
- Eterm *cp = list_val(opts_list);
- Eterm opt = CAR(cp);
- opts_list = CDR(cp);
- if (opt == am_nodedown_reason)
- opts |= ERTS_NODES_MON_OPT_DOWN_REASON;
- else if (is_tuple(opt)) {
- Eterm* tp = tuple_val(opt);
- if (arityval(tp[0]) != 2)
- return THE_NON_VALUE;
- switch (tp[1]) {
- case am_node_type:
- switch (tp[2]) {
- case am_visible:
- if (hidden || all)
- return THE_NON_VALUE;
- opts |= ERTS_NODES_MON_OPT_TYPE_VISIBLE;
- visible = 1;
- break;
- case am_hidden:
- if (visible || all)
- return THE_NON_VALUE;
- opts |= ERTS_NODES_MON_OPT_TYPE_HIDDEN;
- hidden = 1;
- break;
- case am_all:
- if (visible || hidden)
- return THE_NON_VALUE;
- opts |= ERTS_NODES_MON_OPT_TYPES;
- all = 1;
- break;
- default:
- return THE_NON_VALUE;
- }
- break;
- default:
- return THE_NON_VALUE;
- }
- }
- else {
- return THE_NON_VALUE;
- }
- }
-
- if (is_not_nil(opts_list))
- return THE_NON_VALUE;
+static void
+nodes_monitor_info(ErtsMonitor *mon, void *vctxt)
+{
+ ErtsMonitorDataExtended *mdep;
+ ErtsNodesMonitorInfoContext *ctxt = vctxt;
+ Uint no, i, opts, *szp;
+ Eterm **hpp, res;
+
+ hpp = ctxt->hpp;
+ szp = ctxt->szp;
+ res = ctxt->res;
+
+ ASSERT(erts_monitor_is_target(mon));
+ ASSERT(mon->type == ERTS_MON_TYPE_NODES);
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
+ no = mdep->u.refc;
+
+ ASSERT(is_small(mdep->md.origin.other.item));
+ opts = (Uint) signed_val(mdep->md.origin.other.item);
+
+ for (i = 0; i < no; i++) {
+ Eterm olist = NIL;
+ if (opts & ERTS_NODES_MON_OPT_TYPES) {
+ Eterm type;
+ switch (opts & ERTS_NODES_MON_OPT_TYPES) {
+ case ERTS_NODES_MON_OPT_TYPES: type = am_all; break;
+ case ERTS_NODES_MON_OPT_TYPE_VISIBLE: type = am_visible; break;
+ case ERTS_NODES_MON_OPT_TYPE_HIDDEN: type = am_hidden; break;
+ default: erts_exit(ERTS_ABORT_EXIT, "Bad node type found\n");
+ }
+ olist = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 2,
+ am_node_type,
+ type),
+ olist);
+ }
+ if (opts & ERTS_NODES_MON_OPT_DOWN_REASON)
+ olist = erts_bld_cons(hpp, szp, am_nodedown_reason, olist);
+ res = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 2,
+ mon->other.item,
+ olist),
+ res);
}
- erts_smp_mtx_lock(&nodes_monitors_mtx);
-
- if (on == am_true)
- res = insert_nodes_monitor(c_p, opts);
- else
- res = remove_nodes_monitors(c_p, opts, 0);
-
- erts_smp_mtx_unlock(&nodes_monitors_mtx);
-
- return res;
+ ctxt->hpp = hpp;
+ ctxt->szp = szp;
+ ctxt->res = res;
}
-/*
- * Note, this function is only used for debuging.
- */
-
Eterm
erts_processes_monitoring_nodes(Process *c_p)
{
- ErtsNodesMonitor *nmp;
- Eterm res;
+ /*
+ * Note, this function is only used for debugging.
+ */
+ ErtsNodesMonitorInfoContext ctxt;
Eterm *hp;
- Eterm **hpp;
Uint sz;
- Uint *szp;
#ifdef DEBUG
Eterm *hend;
#endif
ASSERT(c_p);
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
- erts_smp_mtx_lock(&nodes_monitors_mtx);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
+
+ erts_mtx_lock(&nodes_monitors_mtx);
sz = 0;
- szp = &sz;
- hpp = NULL;
+ ctxt.szp = &sz;
+ ctxt.hpp = NULL;
- bld_result:
- res = NIL;
-
- for (nmp = nodes_monitors_end; nmp; nmp = nmp->prev) {
- Uint16 i;
- for (i = 0; i < nmp->no; i++) {
- Eterm olist = NIL;
- if (nmp->opts & ERTS_NODES_MON_OPT_TYPES) {
- Eterm type;
- switch (nmp->opts & ERTS_NODES_MON_OPT_TYPES) {
- case ERTS_NODES_MON_OPT_TYPES: type = am_all; break;
- case ERTS_NODES_MON_OPT_TYPE_VISIBLE: type = am_visible; break;
- case ERTS_NODES_MON_OPT_TYPE_HIDDEN: type = am_hidden; break;
- default: erts_exit(ERTS_ABORT_EXIT, "Bad node type found\n");
- }
- olist = erts_bld_cons(hpp, szp,
- erts_bld_tuple(hpp, szp, 2,
- am_node_type,
- type),
- olist);
- }
- if (nmp->opts & ERTS_NODES_MON_OPT_DOWN_REASON)
- olist = erts_bld_cons(hpp, szp, am_nodedown_reason, olist);
- res = erts_bld_cons(hpp, szp,
- erts_bld_tuple(hpp, szp, 2,
- nmp->proc->common.id,
- olist),
- res);
- }
- }
+ while (1) {
+ ctxt.res = NIL;
+
+ erts_monitor_list_foreach(nodes_monitors,
+ nodes_monitor_info,
+ (void *) &ctxt);
+
+ if (ctxt.hpp)
+ break;
- if (!hpp) {
hp = HAlloc(c_p, sz);
#ifdef DEBUG
hend = hp + sz;
#endif
- hpp = &hp;
- szp = NULL;
- goto bld_result;
+ ctxt.hpp = &hp;
+ ctxt.szp = NULL;
}
ASSERT(hp == hend);
- erts_smp_mtx_unlock(&nodes_monitors_mtx);
+ erts_mtx_unlock(&nodes_monitors_mtx);
- return res;
+ erts_thr_progress_unblock();
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ return ctxt.res;
}
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index e82b416286..d4d7874a70 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -40,10 +40,53 @@
#define DFLAG_UNICODE_IO 0x1000
#define DFLAG_DIST_HDR_ATOM_CACHE 0x2000
#define DFLAG_SMALL_ATOM_TAGS 0x4000
-#define DFLAG_INTERNAL_TAGS 0x8000
+#define DFLAG_INTERNAL_TAGS 0x8000 /* used by ETS 'compressed' option */
#define DFLAG_UTF8_ATOMS 0x10000
#define DFLAG_MAP_TAG 0x20000
#define DFLAG_BIG_CREATION 0x40000
+#define DFLAG_SEND_SENDER 0x80000
+#define DFLAG_BIG_SEQTRACE_LABELS 0x100000
+#define DFLAG_NO_MAGIC 0x200000 /* internal for pending connection */
+
+/* Mandatory flags for distribution */
+#define DFLAG_DIST_MANDATORY (DFLAG_EXTENDED_REFERENCES \
+ | DFLAG_EXTENDED_PIDS_PORTS \
+ | DFLAG_UTF8_ATOMS \
+ | DFLAG_NEW_FUN_TAGS)
+
+/*
+ * Additional optimistic flags when encoding toward pending connection.
+ * If remote node (erl_interface) does not supporting these then we may need
+ * to transcode messages enqueued before connection setup was finished.
+ */
+#define DFLAG_DIST_HOPEFULLY (DFLAG_EXPORT_PTR_TAG \
+ | DFLAG_BIT_BINARIES \
+ | DFLAG_DIST_MONITOR \
+ | DFLAG_DIST_MONITOR_NAME)
+
+/* Our preferred set of flags. Used for connection setup handshake */
+#define DFLAG_DIST_DEFAULT (DFLAG_DIST_MANDATORY | DFLAG_DIST_HOPEFULLY \
+ | DFLAG_FUN_TAGS \
+ | DFLAG_NEW_FLOATS \
+ | DFLAG_UNICODE_IO \
+ | DFLAG_DIST_HDR_ATOM_CACHE \
+ | DFLAG_SMALL_ATOM_TAGS \
+ | DFLAG_UTF8_ATOMS \
+ | DFLAG_MAP_TAG \
+ | DFLAG_BIG_CREATION \
+ | DFLAG_SEND_SENDER \
+ | DFLAG_BIG_SEQTRACE_LABELS)
+
+/* Flags addable by local distr implementations */
+#define DFLAG_DIST_ADDABLE DFLAG_DIST_DEFAULT
+
+/* Flags rejectable by local distr implementation */
+#define DFLAG_DIST_REJECTABLE (DFLAG_DIST_HDR_ATOM_CACHE \
+ | DFLAG_HIDDEN_ATOM_CACHE \
+ | DFLAG_ATOM_CACHE)
+
+/* Flags for all features needing strict order delivery */
+#define DFLAG_DIST_STRICT_ORDER DFLAG_DIST_HDR_ATOM_CACHE
/* All flags that should be enabled when term_to_binary/1 is used. */
#define TERM_TO_BINARY_DFLAGS (DFLAG_EXTENDED_REFERENCES \
@@ -74,53 +117,38 @@
#define DOP_DEMONITOR_P 20
#define DOP_MONITOR_P_EXIT 21
+#define DOP_SEND_SENDER 22
+#define DOP_SEND_SENDER_TT 23
+
/* distribution trap functions */
-extern Export* dsend2_trap;
-extern Export* dsend3_trap;
-extern Export* dlink_trap;
-extern Export* dunlink_trap;
extern Export* dmonitor_node_trap;
-extern Export* dgroup_leader_trap;
-extern Export* dexit_trap;
-extern Export* dmonitor_p_trap;
typedef enum {
ERTS_DSP_NO_LOCK,
- ERTS_DSP_RLOCK,
- ERTS_DSP_RWLOCK
+ ERTS_DSP_RLOCK
} ErtsDSigPrepLock;
typedef struct {
Process *proc;
DistEntry *dep;
+ Eterm node; /* used if dep == NULL */
Eterm cid;
Eterm connection_id;
int no_suspend;
+ Uint32 flags;
} ErtsDSigData;
-#define ERTS_DE_IS_NOT_CONNECTED(DEP) \
- (ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&(DEP)->rwmtx) \
- || erts_lc_rwmtx_is_rwlocked(&(DEP)->rwmtx)), \
- (is_nil((DEP)->cid) || ((DEP)->status & ERTS_DE_SFLG_EXITING)))
-
-#define ERTS_DE_IS_CONNECTED(DEP) \
- (!ERTS_DE_IS_NOT_CONNECTED((DEP)))
-
#define ERTS_DE_BUSY_LIMIT (1024*1024)
extern int erts_dist_buf_busy_limit;
extern int erts_is_alive;
/*
* erts_dsig_prepare() prepares a send of a distributed signal.
- * One of the values defined below are returned. If the returned
- * value is another than ERTS_DSIG_PREP_CONNECTED, the
- * distributed signal cannot be sent before apropriate actions
- * have been taken. Apropriate actions would typically be setting
- * up the connection.
+ * One of the values defined below are returned.
*/
-/* Connected; signal can be sent. */
+/* Connected; signals can be enqueued and sent. */
#define ERTS_DSIG_PREP_CONNECTED 0
/* Not connected; connection needs to be set up. */
#define ERTS_DSIG_PREP_NOT_CONNECTED 1
@@ -128,63 +156,94 @@ extern int erts_is_alive;
#define ERTS_DSIG_PREP_WOULD_SUSPEND 2
/* System not alive (distributed) */
#define ERTS_DSIG_PREP_NOT_ALIVE 3
+/* Pending connection; signals can be enqueued */
+#define ERTS_DSIG_PREP_PENDING 4
ERTS_GLB_INLINE int erts_dsig_prepare(ErtsDSigData *,
- DistEntry *,
+ DistEntry*,
Process *,
+ ErtsProcLocks,
ErtsDSigPrepLock,
+ int,
int);
ERTS_GLB_INLINE
void erts_schedule_dist_command(Port *, DistEntry *);
+int erts_auto_connect(DistEntry* dep, Process *proc, ErtsProcLocks proc_locks);
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE int
erts_dsig_prepare(ErtsDSigData *dsdp,
DistEntry *dep,
Process *proc,
+ ErtsProcLocks proc_locks,
ErtsDSigPrepLock dspl,
- int no_suspend)
+ int no_suspend,
+ int connect)
{
- int failure;
+ int res;
+
if (!erts_is_alive)
return ERTS_DSIG_PREP_NOT_ALIVE;
- if (!dep)
- return ERTS_DSIG_PREP_NOT_CONNECTED;
- if (dspl == ERTS_DSP_RWLOCK)
- erts_smp_de_rwlock(dep);
- else
- erts_smp_de_rlock(dep);
- if (ERTS_DE_IS_NOT_CONNECTED(dep)) {
- failure = ERTS_DSIG_PREP_NOT_CONNECTED;
+ if (!dep) {
+ ASSERT(!connect);
+ return ERTS_DSIG_PREP_NOT_CONNECTED;
+ }
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (connect) {
+ erts_proc_lc_might_unlock(proc, proc_locks);
+ }
+#endif
+
+retry:
+ erts_de_rlock(dep);
+
+ if (dep->state == ERTS_DE_STATE_CONNECTED) {
+ res = ERTS_DSIG_PREP_CONNECTED;
+ }
+ else if (dep->state == ERTS_DE_STATE_PENDING) {
+ res = ERTS_DSIG_PREP_PENDING;
+ }
+ else if (dep->state == ERTS_DE_STATE_EXITING) {
+ res = ERTS_DSIG_PREP_NOT_CONNECTED;
goto fail;
}
+ else if (connect) {
+ ASSERT(dep->state == ERTS_DE_STATE_IDLE);
+ erts_de_runlock(dep);
+ if (!erts_auto_connect(dep, proc, proc_locks)) {
+ return ERTS_DSIG_PREP_NOT_ALIVE;
+ }
+ goto retry;
+ }
+ else {
+ ASSERT(dep->state == ERTS_DE_STATE_IDLE);
+ res = ERTS_DSIG_PREP_NOT_CONNECTED;
+ goto fail;
+ }
+
if (no_suspend) {
- failure = ERTS_DSIG_PREP_CONNECTED;
- erts_smp_mtx_lock(&dep->qlock);
- if (dep->qflgs & ERTS_DE_QFLG_BUSY)
- failure = ERTS_DSIG_PREP_WOULD_SUSPEND;
- erts_smp_mtx_unlock(&dep->qlock);
- if (failure == ERTS_DSIG_PREP_WOULD_SUSPEND)
+ if (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY) {
+ res = ERTS_DSIG_PREP_WOULD_SUSPEND;
goto fail;
+ }
}
dsdp->proc = proc;
dsdp->dep = dep;
dsdp->cid = dep->cid;
dsdp->connection_id = dep->connection_id;
dsdp->no_suspend = no_suspend;
+ dsdp->flags = dep->flags;
if (dspl == ERTS_DSP_NO_LOCK)
- erts_smp_de_runlock(dep);
- return ERTS_DSIG_PREP_CONNECTED;
+ erts_de_runlock(dep);
+ return res;
fail:
- if (dspl == ERTS_DSP_RWLOCK)
- erts_smp_de_rwunlock(dep);
- else
- erts_smp_de_runlock(dep);
- return failure;
-
+ erts_de_runlock(dep);
+ return res;
}
ERTS_GLB_INLINE
@@ -194,17 +253,17 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
Eterm id;
if (prt) {
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT((erts_atomic32_read_nob(&prt->state)
& ERTS_PORT_SFLGS_DEAD) == 0);
- ASSERT(prt->dist_entry);
- dep = prt->dist_entry;
+ dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
+ ASSERT(dep);
id = prt->common.id;
}
else {
ASSERT(dist_entry);
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx)
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx)
|| erts_lc_rwmtx_is_rwlocked(&dist_entry->rwmtx));
ASSERT(is_internal_port(dist_entry->cid));
@@ -212,64 +271,19 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
id = dep->cid;
}
- if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1))
+ if (!erts_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1))
erts_port_task_schedule(id, &dep->dist_cmd, ERTS_PORT_TASK_DIST_CMD);
}
#endif
-typedef struct {
- ErtsLink *d_lnk;
- ErtsLink *d_sub_lnk;
-} ErtsDistLinkData;
-
-ERTS_GLB_INLINE void erts_remove_dist_link(ErtsDistLinkData *,
- Eterm,
- Eterm,
- DistEntry *);
-ERTS_GLB_INLINE int erts_was_dist_link_removed(ErtsDistLinkData *);
-ERTS_GLB_INLINE void erts_destroy_dist_link(ErtsDistLinkData *);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE void
-erts_remove_dist_link(ErtsDistLinkData *dldp,
- Eterm lid,
- Eterm rid,
- DistEntry *dep)
-{
- erts_smp_de_links_lock(dep);
- dldp->d_lnk = erts_lookup_link(dep->nlinks, lid);
- if (!dldp->d_lnk)
- dldp->d_sub_lnk = NULL;
- else {
- dldp->d_sub_lnk = erts_remove_link(&ERTS_LINK_ROOT(dldp->d_lnk), rid);
- dldp->d_lnk = (ERTS_LINK_ROOT(dldp->d_lnk)
- ? NULL
- : erts_remove_link(&dep->nlinks, lid));
- }
- erts_smp_de_links_unlock(dep);
-}
-
-ERTS_GLB_INLINE int
-erts_was_dist_link_removed(ErtsDistLinkData *dldp)
-{
- return dldp->d_sub_lnk != NULL;
-}
-
-ERTS_GLB_INLINE void
-erts_destroy_dist_link(ErtsDistLinkData *dldp)
-{
- if (dldp->d_lnk)
- erts_destroy_link(dldp->d_lnk);
- if (dldp->d_sub_lnk)
- erts_destroy_link(dldp->d_sub_lnk);
-}
-
+#ifdef DEBUG
+#define ERTS_DBG_CHK_NO_DIST_LNK(D, R, L) \
+ erts_dbg_chk_no_dist_proc_link((D), (R), (L))
+#else
+#define ERTS_DBG_CHK_NO_DIST_LNK(D, R, L)
#endif
-
-
/* Define for testing */
/* #define EXTREME_TTB_TRAPPING 1 */
@@ -290,6 +304,7 @@ typedef struct TTBSizeContext_ {
typedef struct TTBEncodeContext_ {
Uint flags;
+ Uint hopefull_flags;
int level;
byte* ep;
Eterm obj;
@@ -331,7 +346,7 @@ struct erts_dsig_send_context {
Eterm ctl;
Eterm msg;
int force_busy;
- Uint32 pass_through_size;
+ Uint32 max_finalize_prepend;
Uint data_size, dhdr_ext_size;
ErtsAtomCacheMap *acmp;
ErtsDistOutputBuf *obuf;
@@ -345,10 +360,12 @@ struct erts_dsig_send_context {
typedef struct {
int suspend;
+ int connect;
Eterm ctl_heap[6];
ErtsDSigData dsd;
- DistEntry* dep_to_deref;
+ DistEntry *dep;
+ int deref_dep;
struct erts_dsig_send_context dss;
Eterm return_term;
@@ -375,7 +392,7 @@ extern int erts_dsig_send_monitor(ErtsDSigData *, Eterm, Eterm, Eterm);
extern int erts_dsig_send_m_exit(ErtsDSigData *, Eterm, Eterm, Eterm, Eterm);
extern int erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx);
-extern void erts_dsend_context_dtor(Binary*);
+extern int erts_dsend_context_dtor(Binary*);
extern Eterm erts_dsend_export_trap_context(Process* p, ErtsSendContext* ctx);
extern int erts_dist_command(Port *prt, int reds);
@@ -384,5 +401,7 @@ extern void erts_kill_dist_connection(DistEntry *dep, Uint32);
extern Uint erts_dist_cache_size(void);
+extern Sint erts_abort_connection_rwunlock(DistEntry *dep);
+
#endif
diff --git a/erts/emulator/beam/dtrace-wrapper.h b/erts/emulator/beam/dtrace-wrapper.h
index 6f70d5961e..15ea182976 100644
--- a/erts/emulator/beam/dtrace-wrapper.h
+++ b/erts/emulator/beam/dtrace-wrapper.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2016.
+ * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2017.
* All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -74,7 +74,7 @@
#if defined(_SDT_PROBE) && !defined(STAP_PROBE11)
/* SLF: This is Ubuntu 11-style SystemTap hackery */
-/* work arround for missing STAP macro */
+/* workaround for missing STAP macro */
#define STAP_PROBE11(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11) \
_SDT_PROBE(provider, name, 11, \
(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11))
diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c
index eda3ad870a..38289ea78a 100644
--- a/erts/emulator/beam/erl_afit_alloc.c
+++ b/erts/emulator/beam/erl_afit_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -54,7 +54,7 @@ static void link_free_block (Allctr_t *, Block_t *);
static void unlink_free_block (Allctr_t *, Block_t *);
-static Eterm info_options (Allctr_t *, char *, int *,
+static Eterm info_options (Allctr_t *, char *, fmtfn_t *,
void *arg, Uint **, Uint *);
static void init_atoms (void);
@@ -181,7 +181,7 @@ static struct {
static void ERTS_INLINE atom_init(Eterm *atom, char *name)
{
- *atom = am_atom_put(name, strlen(name));
+ *atom = am_atom_put(name, sys_strlen(name));
}
#define AM_INIT(AM) atom_init(&am.AM, #AM)
@@ -227,7 +227,7 @@ add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
static Eterm
info_options(Allctr_t *allctr,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 3c2c9def3b..9e36d5e0d1 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,15 +38,17 @@
#include "erl_db.h"
#include "erl_binary.h"
#include "erl_bits.h"
-#include "erl_instrument.h"
+#include "erl_mtrace.h"
#include "erl_mseg.h"
-#include "erl_monitors.h"
+#include "erl_monitor_link.h"
#include "erl_hl_timer.h"
#include "erl_cpu_topology.h"
#include "erl_thr_queue.h"
+#include "erl_nfunc_sched.h"
#if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
#include "erl_check_io.h"
#endif
+#include "erl_bif_unique.h"
#define GET_ERL_GF_ALLOC_IMPL
#include "erl_goodfit_alloc.h"
@@ -81,14 +83,6 @@
#define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC
#define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC
-#ifndef ERTS_SMP
-# undef ERTS_ALC_DEFAULT_ACUL
-# define ERTS_ALC_DEFAULT_ACUL 0
-# undef ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC
-# define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC 0
-# undef ERTS_ALC_DEFAULT_ACUL_LL_ALLOC
-# define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC 0
-#endif
#ifdef DEBUG
static Uint install_debug_functions(void);
@@ -120,7 +114,7 @@ typedef union {
char align_afa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AFAllctr_t))];
AOFFAllctr_t aoffa;
char align_aoffa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AOFFAllctr_t))];
-} ErtsAllocatorState_t;
+} ErtsAllocatorState_t erts_align_attribute(ERTS_CACHE_LINE_SIZE);
static ErtsAllocatorState_t std_alloc_state;
static ErtsAllocatorState_t ll_alloc_state;
@@ -146,13 +140,12 @@ enum {
};
typedef struct {
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
int only_sz;
int internal;
Uint req_sched;
Process *proc;
- Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ ErtsIRefStorage iref;
int allocs[ERTS_ALC_INFO_A_END - ERTS_ALC_A_MIN + 1];
} ErtsAllocInfoReq;
@@ -167,7 +160,7 @@ enum allctr_type {
GOODFIT,
BESTFIT,
AFIT,
- AOFIRSTFIT
+ FIRSTFIT
};
struct au_init {
@@ -209,8 +202,6 @@ typedef struct {
int top_pad;
AlcUInit_t alloc_util;
struct {
- int stat;
- int map;
char *mtrace;
char *nodename;
} instr;
@@ -373,10 +364,9 @@ set_default_exec_alloc_opts(struct au_init *ip)
ip->init.util.rmbcmt = 0;
ip->init.util.acul = 0;
- ip->init.util.mseg_alloc = &erts_alcu_mmapper_mseg_alloc;
- ip->init.util.mseg_realloc = &erts_alcu_mmapper_mseg_realloc;
- ip->init.util.mseg_dealloc = &erts_alcu_mmapper_mseg_dealloc;
- ip->init.util.mseg_mmapper = &erts_exec_mmapper;
+ ip->init.util.mseg_alloc = &erts_alcu_exec_mseg_alloc;
+ ip->init.util.mseg_realloc = &erts_alcu_exec_mseg_realloc;
+ ip->init.util.mseg_dealloc = &erts_alcu_exec_mseg_dealloc;
}
#endif /* ERTS_ALC_A_EXEC */
@@ -386,6 +376,7 @@ set_default_temp_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
ip->thr_spec = 1;
+ ip->disable_allowed = 0;
ip->carrier_migration_allowed = 0;
ip->atype = AFIT;
ip->init.util.name_prefix = "temp_";
@@ -435,6 +426,7 @@ set_default_binary_alloc_opts(struct au_init *ip)
#endif
ip->init.util.ts = ERTS_ALC_MTA_BINARY;
ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
+ ip->init.util.atags = 1;
}
static void
@@ -471,6 +463,7 @@ set_default_driver_alloc_opts(struct au_init *ip)
#endif
ip->init.util.ts = ERTS_ALC_MTA_DRIVER;
ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
+ ip->init.util.atags = 1;
}
static void
@@ -500,13 +493,15 @@ set_default_test_alloc_opts(struct au_init *ip)
SET_DEFAULT_ALLOC_OPTS(ip);
ip->enable = 0; /* Disabled by default */
ip->thr_spec = -1 * erts_no_schedulers;
- ip->atype = AOFIRSTFIT;
- ip->init.aoff.flavor = AOFF_BF;
+ ip->atype = FIRSTFIT;
+ ip->init.aoff.crr_order = FF_AOFF;
+ ip->init.aoff.blk_order = FF_BF;
ip->init.util.name_prefix = "test_";
ip->init.util.alloc_no = ERTS_ALC_A_TEST;
ip->init.util.mmbcs = 0; /* Main carrier size */
ip->init.util.ts = ERTS_ALC_MTA_TEST;
ip->init.util.acul = ERTS_ALC_DEFAULT_ACUL;
+ ip->init.util.atags = 1;
/* Use a constant minimal MBC size */
#if ERTS_SA_MB_CARRIERS
@@ -521,7 +516,6 @@ set_default_test_alloc_opts(struct au_init *ip)
}
-#ifdef ERTS_SMP
static void
adjust_tpref(struct au_init *ip, int no_sched)
@@ -544,7 +538,6 @@ adjust_tpref(struct au_init *ip, int no_sched)
}
}
-#endif
static void handle_args(int *, char **, erts_alc_hndl_args_init_t *);
@@ -573,7 +566,6 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size)
if (extra_block_size && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled) {
int j;
-#ifdef ERTS_SMP
if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec) {
int i;
ErtsAllocatorThrSpec_t* tspec;
@@ -589,7 +581,6 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size)
}
}
else
-#endif
{
Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra;
for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
@@ -603,16 +594,15 @@ static ERTS_INLINE int
strategy_support_carrier_migration(struct au_init *auip)
{
/*
- * Currently only aoff, aoffcbf and aoffcaobf support carrier
+ * Currently only aoff* and ageff* support carrier
* migration, i.e, type AOFIRSTFIT.
*/
- return auip->atype == AOFIRSTFIT;
+ return auip->atype == FIRSTFIT;
}
static ERTS_INLINE void
adjust_carrier_migration_support(struct au_init *auip)
{
-#ifdef ERTS_SMP
if (auip->init.util.acul) {
auip->thr_spec = -1; /* Need thread preferred */
@@ -622,13 +612,11 @@ adjust_carrier_migration_support(struct au_init *auip)
*/
if (!strategy_support_carrier_migration(auip)) {
/* Default to aoffcbf */
- auip->atype = AOFIRSTFIT;
- auip->init.aoff.flavor = AOFF_BF;
+ auip->atype = FIRSTFIT;
+ auip->init.aoff.crr_order = FF_AOFF;
+ auip->init.aoff.blk_order = FF_BF;
}
}
-#else
- auip->init.util.acul = 0;
-#endif
}
void
@@ -649,30 +637,30 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_PROC)]
= sizeof(Process);
- fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MONITOR_SH)]
- = ERTS_MONITOR_SH_SIZE * sizeof(Uint);
- fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NLINK_SH)]
- = ERTS_LINK_SH_SIZE * sizeof(Uint);
- fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_EV_D_STATE)]
- = sizeof(ErtsDrvEventDataState);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MONITOR)]
+ = sizeof(ErtsMonitorDataHeap);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LINK)]
+ = sizeof(ErtsLinkData);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)]
= sizeof(ErtsDrvSelectDataState);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_SEL_D_STATE)]
+ = sizeof(ErtsNifSelectDataState);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)]
= sizeof(ErtsMessageRef);
-#ifdef ERTS_SMP
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)]
= sizeof(ErtsThrQElement_t);
-#endif
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LL_PTIMER)]
= erts_timer_type_size(ERTS_ALC_T_LL_PTIMER);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_HL_PTIMER)]
= erts_timer_type_size(ERTS_ALC_T_HL_PTIMER);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)]
= erts_timer_type_size(ERTS_ALC_T_BIF_TIMER);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_ABIF_TIMER)]
- = erts_timer_type_size(ERTS_ALC_T_ABIF_TIMER);
-#endif
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_EXP_TRACE)]
+ = sizeof(NifExportTrace);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MREF_NSCHED_ENT)]
+ = sizeof(ErtsNSchedMagicRefTableEntry);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MINDIRECTION)]
+ = ERTS_MAGIC_BIN_UNALIGNED_SIZE(sizeof(ErtsMagicIndirectionWord));
#ifdef HARD_DEBUG
hdbg_init();
@@ -723,20 +711,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
#endif
}
-#ifndef ERTS_SMP
- init.sl_alloc.thr_spec = 0;
- init.std_alloc.thr_spec = 0;
- init.ll_alloc.thr_spec = 0;
- init.eheap_alloc.thr_spec = 0;
- init.binary_alloc.thr_spec = 0;
- init.ets_alloc.thr_spec = 0;
- init.driver_alloc.thr_spec = 0;
- init.fix_alloc.thr_spec = 0;
- init.literal_alloc.thr_spec = 0;
-#ifdef ERTS_ALC_A_EXEC
- init.exec_alloc.thr_spec = 0;
-#endif
-#endif
/* Make adjustments for carrier migration support */
init.temp_alloc.init.util.acul = 0;
@@ -787,7 +761,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
#endif
}
-#ifdef ERTS_SMP
/* Only temp_alloc can use thread specific interface */
if (init.temp_alloc.thr_spec)
init.temp_alloc.thr_spec = erts_no_schedulers;
@@ -806,10 +779,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
adjust_tpref(&init.exec_alloc, erts_no_schedulers);
#endif
-#else
- /* No thread specific if not smp */
- init.temp_alloc.thr_spec = 0;
-#endif
/*
* The following allocators cannot be run with afit strategy.
@@ -828,10 +797,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
refuse_af_strategy(&init.exec_alloc);
#endif
-#ifdef ERTS_SMP
if (!init.temp_alloc.thr_spec)
refuse_af_strategy(&init.temp_alloc);
-#endif
erts_mtrace_pre_init();
#if HAVE_ERTS_MSEG
@@ -940,7 +907,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
&test_alloc_state);
erts_mtrace_install_wrapper_functions();
- extra_block_size += erts_instr_init(init.instr.stat, init.instr.map);
init_aireq_alloc();
@@ -995,8 +961,6 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
return;
}
-#ifdef USE_THREADS
-#ifdef ERTS_SMP
if (init->thr_spec) {
if (init->thr_spec > 0) {
af->alloc = erts_alcu_alloc_thr_spec;
@@ -1026,7 +990,6 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
ai->thr_spec = tspec->size;
}
else
-#endif
if (init->init.util.ts) {
af->alloc = erts_alcu_alloc_ts;
if (init->init.util.fix_type_size)
@@ -1038,21 +1001,9 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
af->free = erts_alcu_free_ts;
}
else
-#endif
{
-#ifdef ERTS_SMP
erts_exit(ERTS_ABORT_EXIT, "%salloc is not thread safe\n",
init->init.util.name_prefix);
-#else
- af->alloc = erts_alcu_alloc;
- if (init->init.util.fix_type_size)
- af->realloc = erts_realloc_fixed_size;
- else if (init->init.util.ramv)
- af->realloc = erts_alcu_realloc_mv;
- else
- af->realloc = erts_alcu_realloc;
- af->free = erts_alcu_free;
-#endif
}
af->extra = NULL;
ai->alloc_util = 1;
@@ -1176,7 +1127,7 @@ start_au_allocator(ErtsAlcType_t alctr_n,
&init->init.af,
&init->init.util);
break;
- case AOFIRSTFIT:
+ case FIRSTFIT:
as = erts_aoffalc_start((AOFFAllctr_t *) as0,
&init->init.aoff,
&init->init.util);
@@ -1261,31 +1212,41 @@ get_bool_value(char *param_end, char** argv, int* ip)
{
char *param = argv[*ip]+1;
char *value = get_value(param_end, argv, ip);
- if (strcmp(value, "true") == 0)
+ if (sys_strcmp(value, "true") == 0)
return 1;
- else if (strcmp(value, "false") == 0)
+ else if (sys_strcmp(value, "false") == 0)
return 0;
else
bad_value(param, param_end, value);
return -1;
}
+static Uint kb_to_bytes(Sint kb, Uint *bytes)
+{
+ const Uint max = ((~((Uint) 0))/1024) + 1;
+
+ if (kb < 0 || (Uint)kb > max)
+ return 0;
+ if ((Uint)kb == max)
+ *bytes = ~((Uint) 0);
+ else
+ *bytes = ((Uint) kb)*1024;
+ return 1;
+}
+
static Uint
get_kb_value(char *param_end, char** argv, int* ip)
{
Sint tmp;
- Uint max = ((~((Uint) 0))/1024) + 1;
+ Uint bytes = 0;
char *rest;
char *param = argv[*ip]+1;
char *value = get_value(param_end, argv, ip);
errno = 0;
tmp = (Sint) ErtsStrToSint(value, &rest, 10);
- if (errno != 0 || rest == value || tmp < 0 || max < ((Uint) tmp))
+ if (errno != 0 || rest == value || !kb_to_bytes(tmp, &bytes))
bad_value(param, param_end, value);
- if (max == (Uint) tmp)
- return ~((Uint) 0);
- else
- return ((Uint) tmp)*1024;
+ return bytes;
}
static UWord
@@ -1372,55 +1333,87 @@ handle_au_arg(struct au_init *auip,
switch (sub_param[0]) {
case 'a':
- if (has_prefix("acul", sub_param)) {
- if (!auip->carrier_migration_allowed) {
- if (!u_switch)
- goto bad_switch;
- else {
- /* ignore */
- (void) get_acul_value(auip, sub_param + 4, argv, ip);
- break;
- }
- }
- auip->init.util.acul = get_acul_value(auip, sub_param + 4, argv, ip);
- }
+ if (sub_param[1] == 'c') { /* Migration parameters "ac*" */
+ UWord value;
+ UWord* wp;
+ if (!auip->carrier_migration_allowed && !u_switch)
+ goto bad_switch;
+
+ if (has_prefix("acul", sub_param)) {
+ value = get_acul_value(auip, sub_param + 4, argv, ip);
+ wp = &auip->init.util.acul;
+ }
+ else if (has_prefix("acnl", sub_param)) {
+ value = get_amount_value(sub_param + 4, argv, ip);
+ wp = &auip->init.util.acnl;
+ }
+ else if (has_prefix("acfml", sub_param)) {
+ value = get_amount_value(sub_param + 5, argv, ip);
+ wp = &auip->init.util.acfml;
+ }
+ else
+ goto bad_switch;
+
+ if (auip->carrier_migration_allowed)
+ *wp = value;
+ }
else if(has_prefix("asbcst", sub_param)) {
auip->init.util.asbcst = get_kb_value(sub_param + 6, argv, ip);
}
else if(has_prefix("as", sub_param)) {
char *alg = get_value(sub_param + 2, argv, ip);
- if (strcmp("bf", alg) == 0) {
+ if (sys_strcmp("bf", alg) == 0) {
auip->atype = BESTFIT;
auip->init.bf.ao = 0;
}
- else if (strcmp("aobf", alg) == 0) {
+ else if (sys_strcmp("aobf", alg) == 0) {
auip->atype = BESTFIT;
auip->init.bf.ao = 1;
}
- else if (strcmp("gf", alg) == 0) {
+ else if (sys_strcmp("gf", alg) == 0) {
auip->atype = GOODFIT;
}
- else if (strcmp("af", alg) == 0) {
+ else if (sys_strcmp("af", alg) == 0) {
auip->atype = AFIT;
}
- else if (strcmp("aoff", alg) == 0) {
- auip->atype = AOFIRSTFIT;
- auip->init.aoff.flavor = AOFF_AOFF;
+ else if (sys_strcmp("aoff", alg) == 0) {
+ auip->atype = FIRSTFIT;
+ auip->init.aoff.crr_order = FF_AOFF;
+ auip->init.aoff.blk_order = FF_AOFF;
}
- else if (strcmp("aoffcbf", alg) == 0) {
- auip->atype = AOFIRSTFIT;
- auip->init.aoff.flavor = AOFF_BF;
+ else if (sys_strcmp("aoffcbf", alg) == 0) {
+ auip->atype = FIRSTFIT;
+ auip->init.aoff.crr_order = FF_AOFF;
+ auip->init.aoff.blk_order = FF_BF;
}
- else if (strcmp("aoffcaobf", alg) == 0) {
- auip->atype = AOFIRSTFIT;
- auip->init.aoff.flavor = AOFF_AOBF;
+ else if (sys_strcmp("aoffcaobf", alg) == 0) {
+ auip->atype = FIRSTFIT;
+ auip->init.aoff.crr_order = FF_AOFF;
+ auip->init.aoff.blk_order = FF_AOBF;
}
+ else if (sys_strcmp("ageffcaoff", alg) == 0) {
+ auip->atype = FIRSTFIT;
+ auip->init.aoff.crr_order = FF_AGEFF;
+ auip->init.aoff.blk_order = FF_AOFF;
+ }
+ else if (sys_strcmp("ageffcbf", alg) == 0) {
+ auip->atype = FIRSTFIT;
+ auip->init.aoff.crr_order = FF_AGEFF;
+ auip->init.aoff.blk_order = FF_BF;
+ }
+ else if (sys_strcmp("ageffcaobf", alg) == 0) {
+ auip->atype = FIRSTFIT;
+ auip->init.aoff.crr_order = FF_AGEFF;
+ auip->init.aoff.blk_order = FF_AOBF;
+ }
else {
bad_value(param, sub_param + 1, alg);
}
if (!strategy_support_carrier_migration(auip))
auip->init.util.acul = 0;
- }
+ } else if (has_prefix("atags", sub_param)) {
+ auip->init.util.atags = get_bool_value(sub_param + 5, argv, ip);
+ }
else
goto bad_switch;
break;
@@ -1537,8 +1530,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
&init->ll_alloc,
&init->driver_alloc,
&init->fix_alloc,
- &init->sl_alloc,
- &init->temp_alloc
+ &init->sl_alloc
/* test_alloc not affected by +Mea??? or +Mu??? */
};
int aui_sz = (int) sizeof(aui)/sizeof(aui[0]);
@@ -1571,10 +1563,8 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
break;
case 'X':
if (has_prefix("scs", argv[i]+3)) {
-#ifdef ERTS_ALC_A_EXEC
- init->mseg.exec_mmap.scs =
-#endif
- get_mb_value(argv[i]+6, argv, &i);
+ /* Ignore obsolete */
+ (void) get_mb_value(argv[i]+6, argv, &i);
}
else
handle_au_arg(&init->exec_alloc, &argv[i][3], argv, &i, 0);
@@ -1691,7 +1681,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
}
else if (has_prefix("e", param+2)) {
arg = get_value(param+3, argv, &i);
- if (strcmp("true", arg) != 0)
+ if (sys_strcmp("true", arg) != 0)
bad_value(param, param+3, arg);
}
else
@@ -1703,20 +1693,20 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
case 'a': {
int a;
arg = get_value(argv[i]+4, argv, &i);
- if (strcmp("min", arg) == 0) {
+ if (sys_strcmp("min", arg) == 0) {
for (a = 0; a < aui_sz; a++)
aui[a]->enable = 0;
}
- else if (strcmp("max", arg) == 0) {
+ else if (sys_strcmp("max", arg) == 0) {
for (a = 0; a < aui_sz; a++)
aui[a]->enable = 1;
}
- else if (strcmp("config", arg) == 0) {
+ else if (sys_strcmp("config", arg) == 0) {
init->erts_alloc_config = 1;
}
- else if (strcmp("r9c", arg) == 0
- || strcmp("r10b", arg) == 0
- || strcmp("r11b", arg) == 0) {
+ else if (sys_strcmp("r9c", arg) == 0
+ || sys_strcmp("r10b", arg) == 0
+ || sys_strcmp("r11b", arg) == 0) {
set_default_sl_alloc_opts(&init->sl_alloc);
set_default_std_alloc_opts(&init->std_alloc);
set_default_ll_alloc_opts(&init->ll_alloc);
@@ -1728,7 +1718,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
set_default_driver_alloc_opts(&init->fix_alloc);
init->driver_alloc.enable = 0;
- if (strcmp("r9c", arg) == 0) {
+ if (sys_strcmp("r9c", arg) == 0) {
init->sl_alloc.enable = 0;
init->std_alloc.enable = 0;
init->binary_alloc.enable = 0;
@@ -1753,24 +1743,6 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
break;
case 'i':
switch (argv[i][3]) {
- case 's':
- arg = get_value(argv[i]+4, argv, &i);
- if (strcmp("true", arg) == 0)
- init->instr.stat = 1;
- else if (strcmp("false", arg) == 0)
- init->instr.stat = 0;
- else
- bad_value(param, param+3, arg);
- break;
- case 'm':
- arg = get_value(argv[i]+4, argv, &i);
- if (strcmp("true", arg) == 0)
- init->instr.map = 1;
- else if (strcmp("false", arg) == 0)
- init->instr.map = 0;
- else
- bad_value(param, param+3, arg);
- break;
case 't':
init->instr.mtrace = get_value(argv[i]+4, argv, &i);
break;
@@ -1781,9 +1753,9 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
case 'l':
if (has_prefix("pm", param+2)) {
arg = get_value(argv[i]+5, argv, &i);
- if (strcmp("all", arg) == 0)
+ if (sys_strcmp("all", arg) == 0)
lock_all_physical_memory = 1;
- else if (strcmp("no", arg) == 0)
+ else if (sys_strcmp("no", arg) == 0)
lock_all_physical_memory = 0;
else
bad_value(param, param+4, arg);
@@ -1829,12 +1801,10 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
case '-':
if (argv[i][2] == '\0') {
/* End of system flags reached */
- if (init->instr.mtrace
- /* || init->instr.stat
- || init->instr.map */) {
+ if (init->instr.mtrace) {
while (i < *argc) {
- if(strcmp(argv[i], "-sname") == 0
- || strcmp(argv[i], "-name") == 0) {
+ if(sys_strcmp(argv[i], "-sname") == 0
+ || sys_strcmp(argv[i], "-name") == 0) {
if (i + 1 <*argc) {
init->instr.nodename = argv[i+1];
break;
@@ -1884,9 +1854,7 @@ erts_alloc_register_scheduler(void *vesdp)
int ix = (int) esdp->no;
int aix;
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
esdp->alloc_data.deallctr[aix] = NULL;
@@ -1904,7 +1872,6 @@ erts_alloc_register_scheduler(void *vesdp)
}
}
-#ifdef ERTS_SMP
void
erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
int *need_thr_progress,
@@ -1933,12 +1900,10 @@ erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
}
}
}
-#endif
erts_aint32_t
erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
{
-#ifdef ERTS_SMP
ErtsAllocatorThrSpec_t *tspec;
tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec && tspec->enabled)
@@ -1946,11 +1911,6 @@ erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
if (ix == 0 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
return erts_alcu_fix_alloc_shrink(
erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
-#else
- if (ix == 1 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
- return erts_alcu_fix_alloc_shrink(
- erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
-#endif
return 0;
}
@@ -2113,13 +2073,13 @@ add_fix_values(UWord *ap, UWord *up, ErtsAlcUFixInfo_t *fi, ErtsAlcType_t type)
}
Eterm
-erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
+erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
{
/*
* NOTE! When updating this function, make sure to also update
* erlang:memory/[0,1] in $ERL_TOP/erts/preloaded/src/erlang.erl
*/
-#define ERTS_MEM_NEED_ALL_ALCU (!erts_instr_stat && want_tot_or_sys)
+#define ERTS_MEM_NEED_ALL_ALCU (want_tot_or_sys)
struct {
int total;
int processes;
@@ -2130,7 +2090,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
int binary;
int code;
int ets;
- int maximum;
} want = {0};
struct {
UWord total;
@@ -2142,7 +2101,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
UWord binary;
UWord code;
UWord ets;
- UWord maximum;
} size = {0};
Eterm atoms[sizeof(size)/sizeof(UWord)];
UWord *uintps[sizeof(size)/sizeof(UWord)];
@@ -2154,7 +2112,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
int only_one_value = 0;
ErtsAlcUFixInfo_t fi[ERTS_ALC_NO_FIXED_SIZES] = {{0,0}};
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
/* Figure out whats wanted... */
@@ -2195,12 +2153,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
want.ets = 1;
atoms[length] = am_ets;
uintps[length++] = &size.ets;
-
- want.maximum = erts_instr_stat;
- if (want.maximum) {
- atoms[length] = am_maximum;
- uintps[length++] = &size.maximum;
- }
}
else {
DeclareTmpHeapNoproc(tmp_heap,2);
@@ -2282,18 +2234,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
uintps[length++] = &size.ets;
}
break;
- case am_maximum:
- if (erts_instr_stat) {
- if (!want.maximum) {
- want.maximum = 1;
- atoms[length] = am_maximum;
- uintps[length++] = &size.maximum;
- }
- } else {
- UnUseTmpHeapNoproc(2);
- return am_badarg;
- }
- break;
default:
UnUseTmpHeapNoproc(2);
return am_badarg;
@@ -2327,10 +2267,10 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (proc) {
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
== erts_proc_lc_my_proc_locks(proc));
/* We'll need locks early in the lock order */
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
}
/* Calculate values needed... */
@@ -2392,7 +2332,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
}
tmp += erts_ptab_mem_size(&erts_proc);
tmp += erts_bif_timer_memory_size();
- tmp += erts_tot_link_lh_size();
size.processes = size.processes_used = tmp;
@@ -2403,12 +2342,11 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
add_fix_values(&size.processes,
&size.processes_used,
fi,
- ERTS_ALC_T_MONITOR_SH);
-
+ ERTS_ALC_T_MONITOR);
add_fix_values(&size.processes,
&size.processes_used,
fi,
- ERTS_ALC_T_NLINK_SH);
+ ERTS_ALC_T_LINK);
add_fix_values(&size.processes,
&size.processes_used,
fi,
@@ -2425,12 +2363,10 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
&size.processes_used,
fi,
ERTS_ALC_T_BIF_TIMER);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
add_fix_values(&size.processes,
&size.processes_used,
fi,
- ERTS_ALC_T_ABIF_TIMER);
-#endif
+ ERTS_ALC_T_NIF_EXP_TRACE);
}
if (want.atom || want.atom_used) {
@@ -2463,20 +2399,13 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
size.ets += erts_get_ets_misc_mem_size();
}
- if (erts_instr_stat && (want_tot_or_sys || want.maximum)) {
- if (want_tot_or_sys) {
- size.total = erts_instr_get_total();
- size.system = size.total - size.processes;
- }
- size.maximum = erts_instr_get_max_total();
- }
- else if (want_tot_or_sys) {
+ if (want_tot_or_sys) {
size.system = size.total - size.processes;
}
if (print_to_p) {
int i;
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
/* Print result... */
@@ -2490,7 +2419,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
Uint *hp;
Uint hsz;
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
if (only_one_value) {
ASSERT(length == 1);
@@ -2530,7 +2459,7 @@ struct aa_values {
};
Eterm
-erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
+erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc)
{
#define MAX_AA_VALUES (24)
struct aa_values values[MAX_AA_VALUES];
@@ -2539,27 +2468,15 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
Uint reserved_atom_space, atom_space;
if (proc) {
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
== erts_proc_lc_my_proc_locks(proc));
/* We'll need locks early in the lock order */
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
}
i = 0;
- if (erts_instr_stat) {
- values[i].arity = 2;
- values[i].name = "total";
- values[i].ui[0] = erts_instr_get_total();
- i++;
-
- values[i].arity = 2;
- values[i].name = "maximum";
- values[i].ui[0] = erts_instr_get_max_total();
- i++;
- }
-
values[i].arity = 2;
values[i].name = "sys_misc";
values[i].ui[0] = erts_sys_misc_mem_sz();
@@ -2641,11 +2558,6 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
i++;
values[i].arity = 2;
- values[i].name = "link_lh";
- values[i].ui[0] = erts_tot_link_lh_size();
- i++;
-
- values[i].arity = 2;
values[i].name = "process_table";
values[i].ui[0] = erts_ptab_mem_size(&erts_proc);
i++;
@@ -2665,7 +2577,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
if (print_to_p) {
/* Print result... */
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
erts_print(to, arg, "=allocated_areas\n");
@@ -2695,7 +2607,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
Uint hsz;
Uint *hszp;
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
hpp = NULL;
hsz = 0;
@@ -2707,7 +2619,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
Eterm atom;
if (hpp)
atom = am_atom_put(values[i].name,
- (int) strlen(values[i].name));
+ (int) sys_strlen(values[i].name));
else
atom = am_true;
@@ -2779,11 +2691,11 @@ erts_alloc_util_allocators(void *proc)
}
void
-erts_allocator_info(int to, void *arg)
+erts_allocator_info(fmtfn_t to, void *arg)
{
ErtsAlcType_t a;
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
int ai;
@@ -2836,11 +2748,7 @@ erts_allocator_info(int to, void *arg)
#if HAVE_ERTS_MSEG
{
struct erts_mmap_info_struct emis;
-#ifdef ERTS_SMP
int max = (int) erts_no_schedulers;
-#else
- int max = 0;
-#endif
int i;
for (i = 0; i <= max; i++) {
erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
@@ -2852,10 +2760,6 @@ erts_allocator_info(int to, void *arg)
erts_print(to, arg, "=allocator:erts_mmap.literal_mmap\n");
erts_mmap_info(&erts_literal_mmapper, &to, arg, NULL, NULL, &emis);
#endif
-#ifdef ERTS_ALC_A_EXEC
- erts_print(to, arg, "=allocator:erts_mmap.exec_mmap\n");
- erts_mmap_info(&erts_exec_mmapper, &to, arg, NULL, NULL, &emis);
-#endif
}
#endif
@@ -2863,10 +2767,7 @@ erts_allocator_info(int to, void *arg)
erts_alcu_au_info_options(&to, arg, NULL, NULL);
erts_print(to, arg, "=allocator:instr\n");
- erts_print(to, arg, "option m: %s\n",
- erts_instr_memory_map ? "true" : "false");
- erts_print(to, arg, "option s: %s\n",
- erts_instr_stat ? "true" : "false");
+
erts_print(to, arg, "option t: %s\n",
erts_mtrace_enabled ? "true" : "false");
@@ -2902,7 +2803,7 @@ erts_allocator_options(void *proc)
for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
Eterm tmp = NIL;
atoms[length] = am_atom_put((char *) ERTS_ALC_A2AD(a),
- strlen(ERTS_ALC_A2AD(a)));
+ sys_strlen(ERTS_ALC_A2AD(a)));
if (erts_allctrs_info[a].enabled) {
if (erts_allctrs_info[a].alloc_util) {
Allctr_t *allctr;
@@ -2920,20 +2821,20 @@ erts_allocator_options(void *proc)
Eterm as[4];
Eterm ts[4];
- as[l] = am_atom_put("e", 1);
+ as[l] = ERTS_MAKE_AM("e");
ts[l++] = am_true;
switch (a) {
case ERTS_ALC_A_SYSTEM:
- as[l] = am_atom_put("m", 1);
- ts[l++] = am_atom_put("libc", 4);
+ as[l] = ERTS_MAKE_AM("m");
+ ts[l++] = ERTS_MAKE_AM("libc");
if(sas.trim_threshold >= 0) {
- as[l] = am_atom_put("tt", 2);
+ as[l] = ERTS_MAKE_AM("tt");
ts[l++] = erts_bld_uint(hpp, szp,
(Uint) sas.trim_threshold);
}
if(sas.top_pad >= 0) {
- as[l] = am_atom_put("tp", 2);
+ as[l] = ERTS_MAKE_AM("tp");
ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
}
break;
@@ -2947,7 +2848,7 @@ erts_allocator_options(void *proc)
}
else {
- Eterm atom = am_atom_put("e", 1);
+ Eterm atom = ERTS_MAKE_AM("e");
Eterm term = am_false;
tmp = erts_bld_2tup_list(hpp, szp, 1, &atom, &term);
}
@@ -2958,12 +2859,12 @@ erts_allocator_options(void *proc)
#if HAVE_ERTS_MSEG
if (use_mseg) {
- atoms[length] = am_atom_put("mseg_alloc", 10);
+ atoms[length] = ERTS_MAKE_AM("mseg_alloc");
terms[length++] = erts_mseg_info_options(0, NULL, NULL, hpp, szp);
}
#endif
- atoms[length] = am_atom_put("alloc_util", 10);
+ atoms[length] = ERTS_MAKE_AM("alloc_util");
terms[length++] = erts_alcu_au_info_options(NULL, NULL, hpp, szp);
#if HAVE_ERTS_MMAP
@@ -2972,22 +2873,16 @@ erts_allocator_options(void *proc)
NULL, hpp, szp);
#endif
{
- Eterm o[3], v[3];
- o[0] = am_atom_put("m", 1);
- v[0] = erts_instr_memory_map ? am_true : am_false;
- o[1] = am_atom_put("s", 1);
- v[1] = erts_instr_stat ? am_true : am_false;
- o[2] = am_atom_put("t", 1);
- v[2] = erts_mtrace_enabled ? am_true : am_false;
-
- atoms[length] = am_atom_put("instr", 5);
- terms[length++] = erts_bld_2tup_list(hpp, szp, 3, o, v);
+ Eterm o[1], v[1];
+ o[0] = ERTS_MAKE_AM("t");
+ v[0] = erts_mtrace_enabled ? am_true : am_false;
+
+ atoms[length] = ERTS_MAKE_AM("instr");
+ terms[length++] = erts_bld_2tup_list(hpp, szp, 1, o, v);
}
- atoms[length] = am_atom_put("lock_physical_memory", 20);
- terms[length++] = (lock_all_physical_memory
- ? am_atom_put("all", 3)
- : am_atom_put("no", 2));
+ atoms[length] = ERTS_MAKE_AM("lock_physical_memory");
+ terms[length++] = (lock_all_physical_memory ? am_all : am_no);
settings = erts_bld_2tup_list(hpp, szp, length, atoms, terms);
@@ -2996,28 +2891,25 @@ erts_allocator_options(void *proc)
for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
if (erts_allctrs_info[a].enabled) {
terms[length++] = am_atom_put((char *) ERTS_ALC_A2AD(a),
- strlen(ERTS_ALC_A2AD(a)));
+ sys_strlen(ERTS_ALC_A2AD(a)));
}
}
#if HAVE_ERTS_MSEG
if (use_mseg)
- terms[length++] = am_atom_put("mseg_alloc", 10);
+ terms[length++] = ERTS_MAKE_AM("mseg_alloc");
#endif
#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
- terms[length++] = am_atom_put("sys_aligned_alloc", 17);
+ terms[length++] = ERTS_MAKE_AM("sys_aligned_alloc");
#endif
#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
terms[length++] = ERTS_MAKE_AM("literal_mmap");
#endif
-#ifdef ERTS_ALC_A_EXEC
- terms[length++] = ERTS_MAKE_AM("exec_mmap");
-#endif
features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
#if defined(__GLIBC__)
{
- Eterm AM_glibc = am_atom_put("glibc", 5);
+ Eterm AM_glibc = ERTS_MAKE_AM("glibc");
Eterm version;
version = erts_bld_cons(hpp,
@@ -3102,15 +2994,12 @@ reply_alloc_info(void *vair)
# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
struct erts_mmap_info_struct mmap_info_literal;
# endif
-# ifdef ERTS_ALC_A_EXEC
- struct erts_mmap_info_struct mmap_info_exec;
-# endif
#endif
int i;
Eterm (*info_func)(Allctr_t *,
int,
int,
- int *,
+ fmtfn_t *,
void *,
Uint **,
Uint *) = (air->only_sz
@@ -3126,9 +3015,10 @@ reply_alloc_info(void *vair)
while (1) {
if (hpp)
- ref_copy = STORE_NC(hpp, ohp, air->ref);
+ ref_copy = erts_iref_storage_make_ref(&air->iref,
+ hpp, ohp, 0);
else
- *szp += REF_THING_SIZE;
+ *szp += erts_iref_storage_heap_size(&air->iref);
ai_list = NIL;
for (i = 0; air->allocs[i] != ERTS_ALC_A_INVALID; i++);
@@ -3232,17 +3122,6 @@ reply_alloc_info(void *vair)
erts_bld_atom(hpp,szp,"literal_mmap"),
ainfo);
# endif
-# ifdef ERTS_ALC_A_EXEC
- ai_list = erts_bld_cons(hpp, szp,
- ainfo, ai_list);
- ainfo = (air->only_sz ? NIL :
- erts_mmap_info(&erts_exec_mmapper, NULL, NULL,
- hpp, szp, &mmap_info_exec));
- ainfo = erts_bld_tuple3(hpp, szp,
- alloc_atom,
- erts_bld_atom(hpp,szp,"exec_mmap"),
- ainfo);
-# endif
#else /* !HAVE_ERTS_MMAP */
ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
am_false);
@@ -3300,7 +3179,7 @@ reply_alloc_info(void *vair)
case ERTS_ALC_INFO_A_DISABLED_EXEC:
break;
case ERTS_ALC_INFO_A_MSEG_ALLOC:
-#if HAVE_ERTS_MSEG && defined(ERTS_SMP)
+#if HAVE_ERTS_MSEG
alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
ainfo = erts_mseg_info(sched_id, NULL, NULL,
hpp != NULL, air->only_sz, hpp, szp);
@@ -3354,11 +3233,13 @@ reply_alloc_info(void *vair)
if (air->req_sched == sched_id)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
erts_proc_dec_refc(rp);
- if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&air->refc) == 0) {
+ erts_iref_storage_clean(&air->iref);
aireq_free(air);
+ }
}
int
@@ -3371,7 +3252,6 @@ erts_request_alloc_info(struct process *c_p,
ErtsAllocInfoReq *air = aireq_alloc();
Eterm req_ai[ERTS_ALC_INFO_A_END] = {0};
Eterm alist;
- Eterm *hp;
int airix = 0, ai;
air->req_sched = erts_get_scheduler_id();
@@ -3385,8 +3265,7 @@ erts_request_alloc_info(struct process *c_p,
if (is_not_internal_ref(ref))
return 0;
- hp = &air->ref_heap[0];
- air->ref = STORE_NC(&hp, NULL, ref);
+ erts_iref_storage_save(&air->iref, ref);
if (is_not_list(allocs))
return 0;
@@ -3436,28 +3315,85 @@ erts_request_alloc_info(struct process *c_p,
air->allocs[airix] = ERTS_ALC_A_INVALID;
- erts_smp_atomic32_init_nob(&air->refc,
+ erts_atomic32_init_nob(&air->refc,
(erts_aint32_t) erts_no_schedulers);
erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_alloc_info,
(void *) air);
-#endif
reply_alloc_info((void *) air);
return 1;
}
+Eterm erts_alloc_set_dyn_param(Process* c_p, Eterm tuple)
+{
+ ErtsAllocatorThrSpec_t *tspec;
+ ErtsAlcType_t ai;
+ Allctr_t* allctr;
+ Eterm* tp;
+ Eterm res;
+
+ if (!is_tuple_arity(tuple, 3))
+ goto badarg;
+
+ tp = tuple_val(tuple);
+
+ /*
+ * Ex: {ets_alloc, sbct, 256000}
+ */
+ if (!is_atom(tp[1]) || !is_atom(tp[2]) || !is_integer(tp[3]))
+ goto badarg;
+
+ for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++)
+ if (erts_is_atom_str(erts_alc_a2ad[ai], tp[1], 0))
+ break;
+
+ if (ai > ERTS_ALC_A_MAX)
+ goto badarg;
+
+ if (!erts_allctrs_info[ai].enabled ||
+ !erts_allctrs_info[ai].alloc_util) {
+ return am_notsup;
+ }
+
+ if (tp[2] == am_sbct) {
+ Uint sbct;
+ int i, ok;
+
+ if (!term_to_Uint(tp[3], &sbct))
+ goto badarg;
+
+ tspec = &erts_allctr_thr_spec[ai];
+ if (tspec->enabled) {
+ ok = 0;
+ for (i = 0; i < tspec->size; i++) {
+ allctr = tspec->allctr[i];
+ ok |= allctr->try_set_dyn_param(allctr, am_sbct, sbct);
+ }
+ }
+ else {
+ allctr = erts_allctrs_info[ai].extra;
+ ok = allctr->try_set_dyn_param(allctr, am_sbct, sbct);
+ }
+ return ok ? am_ok : am_notsup;
+ }
+ return am_notsup;
+
+badarg:
+ ERTS_BIF_PREP_ERROR(res, c_p, EXC_BADARG);
+ return res;
+}
+
/*
* The allocator wrapper prelocking stuff below is about the locking order.
- * It only affects wrappers (erl_mtrace.c and erl_instrument.c) that keep locks
- * during alloc/realloc/free.
+ * It only affects wrappers (erl_mtrace.c) that keep locks during
+ * alloc/realloc/free.
*
* Some query functions in erl_alloc_util.c lock the allocator mutex and then
* use erts_printf that in turn may call the sys allocator through the wrappers.
@@ -3502,28 +3438,6 @@ void erts_allctr_wrapper_pre_unlock(void)
}
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * Deprecated functions *
- * *
- * These functions are still defined since "non-OTP linked in drivers" may *
- * contain (illegal) calls to them. *
-\* */
-
-/* --- DO *NOT* USE THESE FUNCTIONS --- */
-
-void *sys_alloc(Uint sz)
-{ return erts_alloc_fnf(ERTS_ALC_T_UNDEF, sz); }
-void *sys_realloc(void *ptr, Uint sz)
-{ return erts_realloc_fnf(ERTS_ALC_T_UNDEF, ptr, sz); }
-void sys_free(void *ptr)
-{ erts_free(ERTS_ALC_T_UNDEF, ptr); }
-void *safe_alloc(Uint sz)
-{ return erts_alloc(ERTS_ALC_T_UNDEF, sz); }
-void *safe_realloc(void *ptr, Uint sz)
-{ return erts_realloc(ERTS_ALC_T_UNDEF, ptr, sz); }
-
-
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* NOTE: erts_alc_test() is only supposed to be used for testing. *
* *
@@ -3544,35 +3458,29 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
case 0xf:
switch (op) {
case 0xf00:
-#ifdef USE_THREADS
if (((Allctr_t *) a1)->thread_safe)
return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF,
(void *) a1,
(Uint) a2);
else
-#endif
return (UWord) erts_alcu_alloc(ERTS_ALC_T_UNDEF,
(void *) a1,
(Uint) a2);
case 0xf01:
-#ifdef USE_THREADS
if (((Allctr_t *) a1)->thread_safe)
return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF,
(void *) a1,
(void *) a2,
(Uint) a3);
else
-#endif
return (UWord) erts_alcu_realloc(ERTS_ALC_T_UNDEF,
(void *) a1,
(void *) a2,
(Uint) a3);
case 0xf02:
-#ifdef USE_THREADS
if (((Allctr_t *) a1)->thread_safe)
erts_alcu_free_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
else
-#endif
erts_alcu_free(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
return 0;
case 0xf03: {
@@ -3583,11 +3491,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
init.enable = 1;
init.atype = GOODFIT;
init.init.util.name_prefix = (char *) a1;
-#ifdef ERTS_SMP
init.init.util.ts = 1;
-#else
- init.init.util.ts = a2 ? 1 : 0;
-#endif
if ((char **) a3) {
char **argv = (char **) a3;
int i = 0;
@@ -3622,7 +3526,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
&init.init.af,
&init.init.util);
break;
- case AOFIRSTFIT:
+ case FIRSTFIT:
allctr = erts_aoffalc_start((AOFFAllctr_t *)
erts_alloc(ERTS_ALC_T_UNDEF,
sizeof(AOFFAllctr_t)),
@@ -3642,7 +3546,6 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
erts_alcu_stop((Allctr_t *) a1);
erts_free(ERTS_ALC_T_UNDEF, (void *) a1);
break;
-#ifdef USE_THREADS
case 0xf05: return (UWord) 1;
case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe;
#ifdef ETHR_NO_FORKSAFETY
@@ -3712,17 +3615,14 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
ethr_thr_exit((void *) a1);
ERTS_ALC_TEST_ABORT;
break;
-#endif /* #ifdef USE_THREADS */
-#ifdef ERTS_SMP
case 0xf13: return (UWord) 1;
-#else
- case 0xf13: return (UWord) 0;
-#endif
case 0xf14: return (UWord) erts_alloc(ERTS_ALC_T_TEST, (Uint)a1);
case 0xf15: erts_free(ERTS_ALC_T_TEST, (void*)a1); return 0;
- case 0xf16: {
+ case 0xf16: return (UWord) erts_realloc(ERTS_ALC_T_TEST, (void*)a1, (Uint)a2);
+
+ case 0xf17: {
Uint extra_hdr_sz = UNIT_CEILING((Uint)a1);
ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
Uint offset = ts->allctr[0]->mbc_header_size;
@@ -3749,7 +3649,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
*(void**)a3 = orig_destroying_mbc;
return offset;
}
- case 0xf17: {
+ case 0xf18: {
ErtsAllocatorThrSpec_t* ts = &erts_allctr_thr_spec[ERTS_ALC_A_TEST];
return ts->allctr[0]->largest_mbc_size;
}
@@ -3831,7 +3731,8 @@ hdbg_init(void)
hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
free_hdbg_mblks = &hdbg_mblks[0];
used_hdbg_mblks = NULL;
- erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug");
+ erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
}
static void *check_memory_fence(void *ptr,
@@ -3920,10 +3821,8 @@ void check_allocators(void)
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) erts_allctrs[i].extra;
Allctr_t *allctr = real_af->extra;
Carrier_t *ct;
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
if (allctr->check_mbc) {
for (ct = allctr->mbc_list.first; ct; ct = ct->next) {
@@ -3931,10 +3830,8 @@ void check_allocators(void)
allctr->check_mbc(allctr,ct);
}
}
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
}
}
}
@@ -3961,7 +3858,7 @@ set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
*(ui_ptr++) = sz;
*(ui_ptr++) = pattern;
- memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
+ sys_memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(UWord));
#ifdef HARD_DEBUG
*mblkpp = hdbg_alloc((void *) ui_ptr, sz, n);
@@ -4001,7 +3898,7 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
(UWord) ptr);
}
- memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord));
+ sys_memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord));
if (post_pattern != MK_PATTERN(n)
|| pre_pattern != post_pattern) {
@@ -4129,12 +4026,23 @@ debug_free(ErtsAlcType_t n, void *extra, void *ptr)
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
void *dptr;
Uint size;
+ int free_pattern = n;
ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
+ if (!ptr)
+ return;
+
dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
- sys_memset((void *) dptr, n, size + FENCE_SZ);
+#ifdef ERTS_ALC_A_EXEC
+# if defined(__i386__) || defined(__x86_64__)
+ if (ERTS_ALC_T2A(ERTS_ALC_N2T(n)) == ERTS_ALC_A_EXEC) {
+ free_pattern = 0x0f; /* Illegal instruction */
+ }
+# endif
+#endif
+ sys_memset((void *) dptr, free_pattern, size + FENCE_SZ);
(*real_af->free)(n, real_af->extra, dptr);
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index 925a081a02..fcb58ff58a 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,9 +27,7 @@
#include "erl_thr_progress.h"
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
#include "erl_alloc_util.h"
-#ifdef USE_THREADS
#include "erl_threads.h"
-#endif
#include "erl_mmap.h"
#ifdef DEBUG
@@ -69,11 +67,11 @@ void *erts_sys_aligned_realloc(UWord alignment, void *ptr, UWord size, UWord old
void erts_sys_aligned_free(UWord alignment, void *ptr);
#endif
-Eterm erts_memory(int *, void *, void *, Eterm);
-Eterm erts_allocated_areas(int *, void *, void *);
+Eterm erts_memory(fmtfn_t *, void *, void *, Eterm);
+Eterm erts_allocated_areas(fmtfn_t *, void *, void *);
Eterm erts_alloc_util_allocators(void *proc);
-void erts_allocator_info(int, void *);
+void erts_allocator_info(fmtfn_t, void *);
Eterm erts_allocator_options(void *proc);
struct process;
@@ -128,8 +126,10 @@ typedef struct {
void *extra;
} ErtsAllocatorFunctions_t;
-extern ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1];
-extern ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
+extern ErtsAllocatorFunctions_t
+ ERTS_WRITE_UNLIKELY(erts_allctrs[ERTS_ALC_A_MAX+1]);
+extern ErtsAllocatorInfo_t
+ ERTS_WRITE_UNLIKELY(erts_allctrs_info[ERTS_ALC_A_MAX+1]);
typedef struct {
int enabled;
@@ -146,7 +146,7 @@ typedef struct ErtsAllocatorWrapper_t_ {
void (*unlock)(void);
struct ErtsAllocatorWrapper_t_* next;
}ErtsAllocatorWrapper_t;
-ErtsAllocatorWrapper_t *erts_allctr_wrappers;
+extern ErtsAllocatorWrapper_t *erts_allctr_wrappers;
extern int erts_allctr_wrapper_prelocked;
extern erts_tsd_key_t erts_allctr_prelock_tsd_key;
void erts_allctr_wrapper_prelock_init(ErtsAllocatorWrapper_t* wrapper);
@@ -154,12 +154,10 @@ void erts_allctr_wrapper_pre_lock(void);
void erts_allctr_wrapper_pre_unlock(void);
void erts_alloc_register_scheduler(void *vesdp);
-#ifdef ERTS_SMP
void erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
int *need_thr_progress,
ErtsThrPrgrVal *thr_prgr_p,
int *more_work);
-#endif
erts_aint32_t erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs);
__decl_noreturn void erts_alloc_enomem(ErtsAlcType_t,Uint)
@@ -173,12 +171,7 @@ __decl_noreturn void erts_realloc_n_enomem(ErtsAlcType_t,void*,Uint)
__decl_noreturn void erts_alc_fatal_error(int,int,ErtsAlcType_t,...)
__noreturn;
-/* --- DO *NOT* USE THESE DEPRECATED FUNCTIONS --- Instead use: */
-void *safe_alloc(Uint) __deprecated; /* erts_alloc() */
-void *safe_realloc(void *, Uint) __deprecated; /* erts_realloc() */
-void sys_free(void *) __deprecated; /* erts_free() */
-void *sys_alloc(Uint ) __deprecated; /* erts_alloc_fnf() */
-void *sys_realloc(void *, Uint) __deprecated; /* erts_realloc_fnf() */
+Eterm erts_alloc_set_dyn_param(struct process*, Eterm);
#undef ERTS_HAVE_IS_IN_LITERAL_RANGE
#if defined(ARCH_32) || defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
@@ -345,54 +338,23 @@ erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr);
(((((SZ) - 1) / ERTS_CACHE_LINE_SIZE) + 1) * ERTS_CACHE_LINE_SIZE)
#define ERTS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
-ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- (void) 0, (void) 0, (void) 0)
-
-#define ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
-static erts_smp_spinlock_t NAME##_lck; \
-ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
- erts_smp_spin_lock(&NAME##_lck), \
- erts_smp_spin_unlock(&NAME##_lck))
-
-#ifdef ERTS_SMP
+ ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, (void) 0, (void) 0, (void) 0)
#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
-ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT)
-
-#else /* !ERTS_SMP */
-
-#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
-static erts_mtx_t NAME##_lck; \
-ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- erts_mtx_init(NAME##_lck, #NAME "_alloc_lock"), \
- erts_mtx_lock(&NAME##_lck), \
- erts_mtx_unlock(&NAME##_lck))
-
-
-#endif
-
-#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \
-ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
+ERTS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT)
#define ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) \
static erts_spinlock_t NAME##_lck; \
ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, \
- erts_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
+ erts_spinlock_init(&NAME##_lck, #NAME "_alloc_lock", NIL, \
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
erts_spin_lock(&NAME##_lck), \
erts_spin_unlock(&NAME##_lck))
-#ifdef ERTS_SMP
-#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \
+#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \
ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ)
-#else /* !ERTS_SMP */
-
-#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \
- ERTS_PALLOC_IMPL(NAME, TYPE, PASZ)
-
-#endif
#define ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, ILCK, LCK, ULCK) \
ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, ILCK, LCK, ULCK) \
@@ -416,21 +378,11 @@ NAME##_free(TYPE *p) \
erts_free(ALCT, (void *) p); \
}
-#ifdef ERTS_SMP
#define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \
ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ)
-#else
-#define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \
- ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
-#endif
-#ifdef ERTS_SMP
#define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ)
-#else
-#define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
-ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
-#endif
#define ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
@@ -454,9 +406,35 @@ NAME##_free(TYPE *p) \
erts_free(ALCT, (void *) p); \
}
+#define ERTS_THR_PREF_AUX(NAME, TYPE, PASZ) \
+ERTS_THR_PREF_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ)
+
+#define ERTS_THR_PREF_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
+ERTS_THR_PREF_AUX(NAME, TYPE, PASZ) \
+static void \
+init_##NAME##_alloc(int nthreads) \
+{ \
+ init_##NAME##_pre_alloc(nthreads); \
+} \
+static ERTS_INLINE TYPE * \
+NAME##_alloc(void) \
+{ \
+ TYPE *res = NAME##_pre_alloc(); \
+ if (!res) \
+ res = erts_alloc(ALCT, sizeof(TYPE)); \
+ return res; \
+} \
+static ERTS_INLINE void \
+NAME##_free(TYPE *p) \
+{ \
+ if (!NAME##_pre_free(p)) \
+ erts_free(ALCT, (void *) p); \
+}
+
+
#ifdef DEBUG
-#define ERTS_PRE_ALLOC_SIZE(SZ) 2
-#define ERTS_PRE_ALLOC_CLOBBER(P, T) memset((void *) (P), 0xfd, sizeof(T))
+#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) < 1000 ? (SZ)/10 + 10 : 100)
+#define ERTS_PRE_ALLOC_CLOBBER(P, T) sys_memset((void *) (P), 0xfd, sizeof(T))
#else
#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) > 1 ? (SZ) : 1)
#define ERTS_PRE_ALLOC_CLOBBER(P, T)
@@ -534,7 +512,8 @@ init_##NAME##_alloc(void) \
{ \
sspa_data_##NAME##__ = \
erts_sspa_create(sizeof(union erts_sspa_##NAME##__), \
- ERTS_PRE_ALLOC_SIZE((PASZ))); \
+ ERTS_PRE_ALLOC_SIZE((PASZ)), \
+ 0, NULL); \
} \
\
static TYPE * \
@@ -556,6 +535,57 @@ NAME##_free(TYPE *p) \
(char *) p); \
}
+
+#define ERTS_THR_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ) \
+union erts_sspa_##NAME##__ { \
+ erts_sspa_blk_t next; \
+ TYPE type; \
+}; \
+ \
+static erts_sspa_data_t *sspa_data_##NAME##__; \
+ \
+static void \
+init_##NAME##_alloc(int nthreads) \
+{ \
+ sspa_data_##NAME##__ = \
+ erts_sspa_create(sizeof(union erts_sspa_##NAME##__), \
+ ERTS_PRE_ALLOC_SIZE((PASZ)), \
+ nthreads, \
+ #NAME); \
+} \
+ \
+void \
+erts_##NAME##_alloc_init_thread(void) \
+{ \
+ int id = erts_atomic_inc_read_nob(&sspa_data_##NAME##__->id_generator);\
+ if (id > sspa_data_##NAME##__->nthreads) { \
+ erts_exit(ERTS_ABORT_EXIT, \
+ "%s:%d:%s(): Too many threads for '" #NAME "'\n", \
+ __FILE__, __LINE__, __func__); \
+ } \
+ erts_tsd_set(sspa_data_##NAME##__->tsd_key, (void*)(SWord)id); \
+} \
+ \
+static TYPE * \
+NAME##_alloc(void) \
+{ \
+ int id = (int)(SWord)erts_tsd_get(sspa_data_##NAME##__->tsd_key); \
+ if (id == 0) \
+ return NULL; \
+ return (TYPE *) erts_sspa_alloc(sspa_data_##NAME##__, \
+ id-1); \
+} \
+ \
+static int \
+NAME##_free(TYPE *p) \
+{ \
+ int id = (int)(SWord)erts_tsd_get(sspa_data_##NAME##__->tsd_key); \
+ return erts_sspa_free(sspa_data_##NAME##__, \
+ id - 1, \
+ (char *) p); \
+}
+
+
#ifdef DEBUG
#define ERTS_ALC_DBG_BLK_SZ(PTR) (*(((UWord *) (PTR)) - 2))
#endif /* #ifdef DEBUG */
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 6e8710eb8a..4f03a34390 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2003-2016. All Rights Reserved.
+# Copyright Ericsson AB 2003-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -53,19 +53,10 @@
#
# IMPORTANT! Only use 7-bit ascii text in this file!
-+if smp
-+disable threads_no_smp
-+else
-+if threads
-+enable threads_no_smp
-+else
-+disable threads_no_smp
-+endif
-+endif
# --- Allocator declarations -------------------------------------------------
#
-# If, and only if, the same thread performes *all* allocations,
+# If, and only if, the same thread performs *all* allocations,
# reallocations and deallocations of all memory types that are handled
# by a specific allocator (<ALLOCATOR> in type declaration), set
# <MULTI_THREAD> for this specific allocator to false; otherwise, set
@@ -77,8 +68,6 @@
allocator SYSTEM true sys_alloc
-+if smp
-
allocator TEMPORARY true temp_alloc
allocator SHORT_LIVED true sl_alloc
allocator STANDARD true std_alloc
@@ -91,22 +80,6 @@ allocator LITERAL true literal_alloc
allocator EXEC true exec_alloc
+endif
-+else # Non smp build
-
-allocator TEMPORARY false temp_alloc
-allocator SHORT_LIVED false sl_alloc
-allocator STANDARD false std_alloc
-allocator LONG_LIVED false ll_alloc
-allocator EHEAP false eheap_alloc
-allocator ETS false ets_alloc
-allocator FIXED_SIZE false fix_alloc
-allocator LITERAL false literal_alloc
-+if exec_alloc
-allocator EXEC false exec_alloc
-+endif
-
-+endif
-
allocator BINARY true binary_alloc
allocator DRIVER true driver_alloc
@@ -145,9 +118,6 @@ type PORT DRIVER SYSTEM port
type ATOM LONG_LIVED ATOM atom_entry
type MODULE LONG_LIVED CODE module_entry
type REG_PROC STANDARD PROCESSES reg_proc
-type LINK_LH STANDARD PROCESSES link_lh
-type SUSPEND_MON STANDARD PROCESSES suspend_monitor
-type PEND_SUSPEND SHORT_LIVED PROCESSES pending_suspend
type PROC_LIST SHORT_LIVED PROCESSES proc_list
type SAVED_ESTACK SHORT_LIVED PROCESSES saved_estack
type FUN_ENTRY LONG_LIVED CODE fun_entry
@@ -160,7 +130,6 @@ type TMP_HEAP TEMPORARY PROCESSES tmp_heap
type MSG_REF FIXED_SIZE PROCESSES msg_ref
type MSG EHEAP PROCESSES message
type MSGQ_CHNG SHORT_LIVED PROCESSES messages_queue_change
-type MSG_ROOTS TEMPORARY PROCESSES msg_roots
type ROOTSET TEMPORARY PROCESSES root_set
type LOADER_TMP TEMPORARY CODE loader_tmp
type PREPARED_CODE SHORT_LIVED CODE prepared_code
@@ -168,7 +137,6 @@ type TIMER_SERVICE LONG_LIVED SYSTEM timer_service
type LL_PTIMER FIXED_SIZE PROCESSES ll_ptimer
type HL_PTIMER FIXED_SIZE PROCESSES hl_ptimer
type BIF_TIMER FIXED_SIZE PROCESSES bif_timer
-# type ABIF_TIMER FIXED_SIZE PROCESSES accessor_bif_timer
type TIMER_REQUEST SHORT_LIVED PROCESSES timer_request
type BTM_YIELD_STATE SHORT_LIVED PROCESSES btm_yield_state
type REG_TABLE STANDARD SYSTEM reg_tab
@@ -206,7 +174,6 @@ type BPD STANDARD SYSTEM bpd
type LINEBUF STANDARD SYSTEM line_buf
type IOQ STANDARD SYSTEM io_queue
type BITS_BUF STANDARD SYSTEM bits_buf
-type TMP_DIST_BUF TEMPORARY SYSTEM tmp_dist_buf
type ASYNC_DATA LONG_LIVED SYSTEM internal_async_data
type ESTACK TEMPORARY SYSTEM estack
type DB_TABLE ETS ETS db_tab
@@ -219,7 +186,6 @@ type DB_MC_STK TEMPORARY ETS db_mc_stack
type DB_MS_RUN_HEAP SHORT_LIVED ETS db_match_spec_run_heap
type DB_MS_CMPL_HEAP TEMPORARY ETS db_match_spec_cmpl_heap
type DB_SEG ETS ETS db_segment
-type DB_SEG_TAB ETS ETS db_segment_tab
type DB_STK ETS ETS db_stack
type DB_TRANS_TAB ETS ETS db_trans_tab
type DB_SEL_LIST ETS ETS db_select_list
@@ -227,7 +193,7 @@ type DB_DMC_ERROR ETS ETS db_dmc_error
type DB_DMC_ERR_INFO ETS ETS db_dmc_error_info
type DB_TERM ETS ETS db_term
type DB_PROC_CLEANUP SHORT_LIVED ETS db_proc_cleanup_state
-type INSTR_INFO LONG_LIVED SYSTEM instr_info
+type ETS_ALL_REQ SHORT_LIVED ETS ets_all_request
type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf
type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf
type INFO_DSBUF SYSTEM SYSTEM info_dsbuf
@@ -242,7 +208,6 @@ type PT_HNDL_LIST SHORT_LIVED SYSTEM port_task_handle_list
type MISC_OP_LIST SHORT_LIVED SYSTEM misc_op_list
type PORT_NAMES SHORT_LIVED SYSTEM port_names
type PORT_DATA_LOCK DRIVER SYSTEM port_data_lock
-type NODES_MON STANDARD PROCESSES nodes_monitor
type PTAB_LIST_DEL SHORT_LIVED PROCESSES ptab_list_deleted_el
type PTAB_LIST_CNKI SHORT_LIVED PROCESSES ptab_list_chunk_info
type PTAB_LIST_PIDS SHORT_LIVED PROCESSES ptab_list_pids
@@ -265,10 +230,8 @@ type CPUDATA LONG_LIVED SYSTEM cpu_data
type TMP_CPU_IDS SHORT_LIVED SYSTEM tmp_cpu_ids
type EXT_TERM_DATA SHORT_LIVED PROCESSES external_term_data
type CPU_GRPS_MAP LONG_LIVED SYSTEM cpu_groups_map
-type AUX_WORK_TMO LONG_LIVED SYSTEM aux_work_timeouts
type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
type CODE_IX_LOCK_Q SHORT_LIVED SYSTEM code_ix_lock_q
-type PROC_INTERVAL LONG_LIVED SYSTEM process_interval
type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table
type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller
type PROC_SYS_TSK SHORT_LIVED PROCESSES proc_sys_task
@@ -277,76 +240,65 @@ type NEW_TIME_OFFSET SHORT_LIVED SYSTEM new_time_offset
type IOB_REQ SHORT_LIVED SYSTEM io_bytes_request
type TRACER_NIF LONG_LIVED SYSTEM tracer_nif
type TRACE_MSG_QUEUE SHORT_LIVED SYSTEM trace_message_queue
-type SCHED_ASYNC_JOB SHORT_LIVED SYSTEM async_calls
-type DIRTY_START STANDARD PROCESSES dirty_start
type DIRTY_SL SHORT_LIVED SYSTEM dirty_short_lived
+type MREF_NSCHED_ENT FIXED_SIZE SYSTEM nsched_magic_ref_entry
+type MREF_ENT STANDARD SYSTEM magic_ref_entry
+type MREF_TAB_BKTS STANDARD SYSTEM magic_ref_table_buckets
+type MREF_TAB LONG_LIVED SYSTEM magic_ref_table
+type MINDIRECTION FIXED_SIZE SYSTEM magic_indirection
+type BINARY_FIND SHORT_LIVED PROCESSES binary_find
+type CRASH_DUMP STANDARD SYSTEM crash_dump
+type DIST_TRANSCODE SHORT_LIVED SYSTEM dist_transcode_context
-+if threads_no_smp
-# Need thread safe allocs, but std_alloc and fix_alloc are not;
-# use driver_alloc which is...
-type THR_Q_EL DRIVER SYSTEM thr_q_element
-type THR_Q_EL_SL DRIVER SYSTEM sl_thr_q_element
-type MISC_AUX_WORK DRIVER SYSTEM misc_aux_work
-+else
type THR_Q_EL STANDARD SYSTEM thr_q_element
type THR_Q_EL_SL FIXED_SIZE SYSTEM sl_thr_q_element
type MISC_AUX_WORK SHORT_LIVED SYSTEM misc_aux_work
-+endif
type THR_Q STANDARD SYSTEM thr_queue
type THR_Q_SL SHORT_LIVED SYSTEM short_lived_thr_queue
type THR_Q_LL LONG_LIVED SYSTEM long_lived_thr_queue
-+if smp
type ASYNC SHORT_LIVED SYSTEM async
type ZLIB STANDARD SYSTEM zlib
-+else
-# sl/std_alloc is not thread safe in non smp build; therefore, we use driver_alloc
-type ZLIB DRIVER SYSTEM zlib
-type ASYNC DRIVER SYSTEM async
-+endif
-+if smp
-type PORT_LOCK STANDARD SYSTEM port_lock
type DRIVER_LOCK STANDARD SYSTEM driver_lock
type XPORTS_LIST SHORT_LIVED SYSTEM extra_port_list
-type PROC_LCK_WTR LONG_LIVED SYSTEM proc_lock_waiter
type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
type THR_PRGR_IDATA LONG_LIVED SYSTEM thr_prgr_internal_data
type THR_PRGR_DATA LONG_LIVED SYSTEM thr_prgr_data
type T_THR_PRGR_DATA SHORT_LIVED SYSTEM temp_thr_prgr_data
type RELEASE_LAREA SHORT_LIVED SYSTEM release_literal_area
-+endif
+type SIG_DATA SHORT_LIVED PROCESSES signal_data
+type DIST_DEMONITOR SHORT_LIVED PROCESSES dist_demonitor
+type CML_CLEANUP SHORT_LIVED SYSTEM connection_ml_cleanup
+type ML_YIELD_STATE SHORT_LIVED SYSTEM monitor_link_yield_state
+type ML_DIST STANDARD SYSTEM monitor_link_dist
+type PF3_ARGS SHORT_LIVED PROCESSES process_flag_3_arguments
+type SETUP_CONN_ARG SHORT_LIVED PROCESSES setup_connection_argument
+type LIST_TRAP SHORT_LIVED PROCESSES list_bif_trap_state
+
+type ENVIRONMENT SYSTEM SYSTEM environment
+
+type PERSISTENT_TERM LONG_LIVED CODE persisten_term
+type PERSISTENT_LOCK_Q SHORT_LIVED SYSTEM persistent_lock_q
#
# Types used for special emulators
#
-+if threads
-
type ETHR_STD STANDARD SYSTEM ethread_standard
type ETHR_SL SHORT_LIVED SYSTEM ethread_short_lived
type ETHR_LL LONG_LIVED SYSTEM ethread_long_lived
-+endif
-
-+if shared_heap
-
-type STACK STANDARD PROCESSES stack
-type ACTIVE_PROCS STANDARD PROCESSES active_procs
-
-+endif
-
-+if smp
type SYS_MSG_Q SHORT_LIVED PROCESSES system_messages_queue
type FP_EXCEPTION LONG_LIVED SYSTEM fp_exception
type LL_MPATHS LONG_LIVED SYSTEM ll_migration_paths
type SL_MPATHS SHORT_LIVED SYSTEM sl_migration_paths
-+endif
+if hipe
-# Currently most hipe code use this type.
-type HIPE SYSTEM SYSTEM hipe_data
+type HIPE_LL LONG_LIVED SYSTEM hipe_long_lived
+type HIPE_SL SHORT_LIVED SYSTEM hipe_short_lived
+type HIPE_STK STANDARD SYSTEM hipe_nstack
+if exec_alloc
type HIPE_EXEC EXEC CODE hipe_code
@@ -354,19 +306,18 @@ type HIPE_EXEC EXEC CODE hipe_code
+endif
++if lcnt
-
-+if heap_frag_elim_test
-
-type SSB SHORT_LIVED PROCESSES ssb
+type LCNT_CARRIER STANDARD SYSTEM lcnt_lock_info_carrier
+type LCNT_VECTOR SHORT_LIVED SYSTEM lcnt_sample_vector
+endif
type DEBUG SHORT_LIVED SYSTEM debugging
type DDLL_PROCESS STANDARD SYSTEM ddll_processes
-type MONITOR_LH STANDARD PROCESSES monitor_lh
-type NLINK_LH STANDARD PROCESSES nlink_lh
+type MONITOR_EXT STANDARD PROCESSES monitor_extended
+type LINK_EXT STANDARD PROCESSES link_extended
type CODE LONG_LIVED CODE code
type LITERAL LITERAL CODE literal
type LITERAL_REF SHORT_LIVED CODE literal_area_ref
@@ -374,18 +325,21 @@ type PURGE_DATA SHORT_LIVED CODE purge_data
type DB_HEIR_DATA STANDARD ETS db_heir_data
type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
-type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
-type NIF_TRAP_EXPORT STANDARD CODE nif_trap_export_entry
+type NIF_TRAP_EXPORT STANDARD PROCESSES nif_trap_export_entry
+type NIF_EXP_TRACE FIXED_SIZE PROCESSES nif_export_trace
type EXPORT LONG_LIVED CODE export_entry
-type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh
-type NLINK_SH FIXED_SIZE PROCESSES nlink_sh
+type MONITOR FIXED_SIZE PROCESSES monitor
+type MONITOR_SUSPEND STANDARD PROCESSES monitor_suspend
+type LINK FIXED_SIZE PROCESSES link
type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request
type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request
type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request
type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
type MSACC DRIVER SYSTEM microstate_accounting
type SYS_CHECK_REQ SHORT_LIVED SYSTEM system_check_request
+type ATOMICS STANDARD SYSTEM erl_bif_atomics
+type COUNTERS STANDARD SYSTEM erl_bif_counters
#
# Types used by system specific code
@@ -394,33 +348,22 @@ type SYS_CHECK_REQ SHORT_LIVED SYSTEM system_check_request
type TEMP_TERM TEMPORARY SYSTEM temp_term
type DRV_TAB LONG_LIVED SYSTEM drv_tab
type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
-type DRV_EV_D_STATE FIXED_SIZE SYSTEM driver_event_data_state
type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state
-type FD_LIST SHORT_LIVED SYSTEM fd_list
-type ACTIVE_FD_ARR SHORT_LIVED SYSTEM active_fd_array
+type NIF_SEL_D_STATE FIXED_SIZE SYSTEM enif_select_data_state
type POLLSET LONG_LIVED SYSTEM pollset
type POLLSET_UPDREQ SHORT_LIVED SYSTEM pollset_update_req
type POLL_FDS LONG_LIVED SYSTEM poll_fds
-type POLL_RES_EVS LONG_LIVED SYSTEM poll_result_events
type FD_STATUS LONG_LIVED SYSTEM fd_status
type SELECT_FDS LONG_LIVED SYSTEM select_fds
+if unix
type SYS_READ_BUF TEMPORARY SYSTEM sys_read_buf
-type FD_TAB LONG_LIVED SYSTEM fd_tab
type FD_ENTRY_BUF STANDARD SYSTEM fd_entry_buf
type CS_PROG_PATH LONG_LIVED SYSTEM cs_prog_path
-type ENVIRONMENT TEMPORARY SYSTEM environment
-type PUTENV_STR SYSTEM SYSTEM putenv_string
-type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
type SYS_BLOCKING STANDARD SYSTEM sys_blocking
-+if smp
type SYS_WRITE_BUF TEMPORARY SYSTEM sys_write_buf
-+else
-type SYS_WRITE_BUF BINARY SYSTEM sys_write_buf
-+endif
+endif
@@ -428,10 +371,7 @@ type SYS_WRITE_BUF BINARY SYSTEM sys_write_buf
type DRV_DATA_BUF SYSTEM SYSTEM drv_data_buf
type PRELOADED LONG_LIVED SYSTEM preloaded
-type PUTENV_STR SYSTEM SYSTEM putenv_string
type WAITER_OBJ LONG_LIVED SYSTEM waiter_object
-type ENVIRONMENT SYSTEM SYSTEM environment
-type CON_VPRINTF_BUF TEMPORARY SYSTEM con_vprintf_buf
+endif
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 2995f2f822..0be4562785 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -48,6 +48,8 @@
#include "erl_mseg.h"
#include "erl_threads.h"
#include "erl_thr_progress.h"
+#include "erl_bif_unique.h"
+#include "erl_nif.h"
#ifdef ERTS_ENABLE_LOCK_COUNT
#include "erl_lock_count.h"
@@ -139,6 +141,33 @@ MBC after deallocating first block:
[Carrier_t|pad|Block_t 111| udata... ]
*/
+/* Allocation tags ...
+ *
+ * These are added to the footer of every block when enabled. Currently they
+ * consist of the allocation type and an atom identifying the allocating
+ * driver/nif (or 'system' if that can't be determined), but the format is not
+ * supposed to be set in stone.
+ *
+ * The packing scheme requires that the atom values are small enough to fit
+ * into a word with ERTS_ALC_N_BITS to spare. Users must check for overflow
+ * before MAKE_ATAG(). */
+
+typedef UWord alcu_atag_t;
+
+#define MAKE_ATAG(IdAtom, Type) \
+ (ASSERT((Type) >= ERTS_ALC_N_MIN && (Type) <= ERTS_ALC_N_MAX), \
+ ASSERT(atom_val(IdAtom) <= MAX_ATAG_ATOM_ID), \
+ (atom_val(IdAtom) << ERTS_ALC_N_BITS) | (Type))
+
+#define ATAG_ID(AT) (make_atom((AT) >> ERTS_ALC_N_BITS))
+#define ATAG_TYPE(AT) ((AT) & ERTS_ALC_N_MASK)
+
+#define MAX_ATAG_ATOM_ID (ERTS_UWORD_MAX >> ERTS_ALC_N_BITS)
+
+#define DBG_IS_VALID_ATAG(Allocator, AT) \
+ (ATAG_TYPE(AT) >= ERTS_ALC_N_MIN && \
+ ATAG_TYPE(AT) <= ERTS_ALC_N_MAX && \
+ (Allocator)->alloc_no == ERTS_ALC_T2A(ERTS_ALC_N2T(ATAG_TYPE(AT))))
/* Blocks ... */
@@ -153,10 +182,17 @@ MBC after deallocating first block:
#endif
#define FBLK_FTR_SZ (sizeof(FreeBlkFtr_t))
+#define GET_BLK_ATAG(B) \
+ (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1])
+#define SET_BLK_ATAG(B, T) \
+ (((alcu_atag_t *) (((char *) (B)) + (BLK_SZ(B))))[-1] = (T))
+
+#define BLK_ATAG_SZ(AP) ((AP)->atags ? sizeof(alcu_atag_t) : 0)
+
#define UMEMSZ2BLKSZ(AP, SZ) \
- (ABLK_HDR_SZ + (SZ) <= (AP)->min_block_size \
+ (ABLK_HDR_SZ + BLK_ATAG_SZ(AP) + (SZ) <= (AP)->min_block_size \
? (AP)->min_block_size \
- : UNIT_CEILING(ABLK_HDR_SZ + (SZ)))
+ : UNIT_CEILING(ABLK_HDR_SZ + BLK_ATAG_SZ(AP) + (SZ)))
#define UMEM2BLK(P) ((Block_t *) (((char *) (P)) - ABLK_HDR_SZ))
#define BLK2UMEM(P) ((void *) (((char *) (P)) + ABLK_HDR_SZ))
@@ -305,9 +341,6 @@ MBC after deallocating first block:
# define ERTS_ALC_CPOOL_DEBUG
#endif
-#ifndef ERTS_SMP
-# undef ERTS_ALC_CPOOL_DEBUG
-#endif
#ifdef ERTS_ALC_CPOOL_DEBUG
# define ERTS_ALC_CPOOL_ASSERT(A) \
@@ -322,13 +355,8 @@ MBC after deallocating first block:
# define ERTS_ALC_CPOOL_ASSERT(A) ((void) 1)
#endif
-#ifdef ERTS_SMP
#define ERTS_ALC_IS_CPOOL_ENABLED(A) ((A)->cpool.util_limit)
-#else
-#define ERTS_ALC_IS_CPOOL_ENABLED(A) (0)
-#endif
-#ifdef ERTS_SMP
#define ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON 1000
#define ERTS_ALC_CPOOL_ALLOC_OP_INC 8
@@ -367,28 +395,18 @@ do { \
} \
} while (0)
-#else
-#define ERTS_ALC_CPOOL_ALLOC_OP(A)
-#define ERTS_ALC_CPOOL_REALLOC_OP(A)
-#define ERTS_ALC_CPOOL_FREE_OP(A)
-#endif
#define ERTS_CRR_ALCTR_FLG_IN_POOL (((erts_aint_t) 1) << 0)
#define ERTS_CRR_ALCTR_FLG_BUSY (((erts_aint_t) 1) << 1)
+#define ERTS_CRR_ALCTR_FLG_HOMECOMING (((erts_aint_t) 1) << 2)
#define ERTS_CRR_ALCTR_FLG_MASK (ERTS_CRR_ALCTR_FLG_IN_POOL | \
- ERTS_CRR_ALCTR_FLG_BUSY)
+ ERTS_CRR_ALCTR_FLG_BUSY | \
+ ERTS_CRR_ALCTR_FLG_HOMECOMING)
-#ifdef ERTS_SMP
#define SBC_HEADER_SIZE \
(UNIT_CEILING(offsetof(Carrier_t, cpool) \
+ ABLK_HDR_SZ) \
- ABLK_HDR_SZ)
-#else
-#define SBC_HEADER_SIZE \
- (UNIT_CEILING(sizeof(Carrier_t) \
- + ABLK_HDR_SZ) \
- - ABLK_HDR_SZ)
-#endif
#define MBC_HEADER_SIZE(AP) ((AP)->mbc_header_size)
@@ -402,7 +420,7 @@ do { \
#define SET_CARRIER_HDR(C, Sz, F, AP) \
(ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \
- erts_smp_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP)))
+ erts_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP)))
#define BLK_TO_SBC(B) \
((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE))
@@ -583,7 +601,7 @@ do { \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
-#define STAT_MBC_CPOOL_INSERT(AP, CRR) \
+#define STAT_MBC_ABANDON(AP, CRR) \
do { \
UWord csz__ = CARRIER_SZ((CRR)); \
if (IS_MSEG_CARRIER((CRR))) \
@@ -598,15 +616,11 @@ do { \
(AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size; \
} while (0)
-#ifdef ERTS_SMP
#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) \
do { \
(CRR)->cpool.blocks++; \
(CRR)->cpool.blocks_size += (BSZ); \
} while (0)
-#else
-#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) ((void) (CRR)) /* Get rid of warning */
-#endif
#define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS) \
do { \
@@ -626,7 +640,6 @@ stat_cpool_mbc_blk_free(Allctr_t *allctr,
Carrier_t **busy_pcrr_pp,
UWord blksz)
{
-#ifdef ERTS_SMP
ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks > 0);
crr->cpool.blocks--;
@@ -651,9 +664,6 @@ stat_cpool_mbc_blk_free(Allctr_t *allctr,
#endif
return 1;
-#else
- return 0;
-#endif
}
#define STAT_MBC_BLK_FREE(AP, CRR, BPCRRPP, BSZ, FLGS) \
@@ -689,12 +699,7 @@ do { \
#endif
#ifdef DEBUG
-#ifdef USE_THREADS
-# ifdef ERTS_SMP
# define IS_ACTUALLY_BLOCKING (erts_thr_progress_is_blocking())
-# else
-# define IS_ACTUALLY_BLOCKING 0
-# endif
#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) \
do { \
if (!(A)->thread_safe && !IS_ACTUALLY_BLOCKING) { \
@@ -703,7 +708,7 @@ do { \
(A)->debug.saved_tid = 1; \
} \
else { \
- ERTS_SMP_LC_ASSERT( \
+ ERTS_LC_ASSERT( \
ethr_equal_tids((A)->debug.tid, erts_thr_self())); \
} \
} \
@@ -711,9 +716,6 @@ do { \
#else
#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
#endif
-#else
-#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
-#endif
static void make_name_atoms(Allctr_t *allctr);
@@ -722,6 +724,62 @@ static void destroy_carrier(Allctr_t *, Block_t *, Carrier_t **);
static void mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp);
static void dealloc_block(Allctr_t *, void *, ErtsAlcFixList_t *, int);
+static alcu_atag_t determine_alloc_tag(Allctr_t *allocator, ErtsAlcType_t type)
+{
+ ErtsSchedulerData *esdp;
+ Eterm id;
+
+ ERTS_CT_ASSERT(_unchecked_atom_val(am_system) <= MAX_ATAG_ATOM_ID);
+ ASSERT(allocator->atags);
+
+ esdp = erts_get_scheduler_data();
+ id = am_system;
+
+ if (esdp) {
+ if (esdp->current_nif) {
+ Module *mod = erts_nif_get_module((esdp->current_nif)->mod_nif);
+
+ /* Mod can be NULL if a resource destructor allocates memory after
+ * the module has been unloaded. */
+ if (mod) {
+ id = make_atom(mod->module);
+ }
+ } else if (esdp->current_port) {
+ Port *p = esdp->current_port;
+ id = (p->drv_ptr)->name_atom;
+ }
+
+ /* We fall back to 'system' if we can't pack the driver/NIF name into
+ * the tag. This may be a bit misleading but we've made no promises
+ * that the information is complete.
+ *
+ * This can only happen on 32-bit emulators when a new driver/NIF has
+ * been loaded *after* 16 million atoms have been used, and supporting
+ * that fringe case is not worth an extra word. 64-bit emulators are
+ * unaffected since the atom cache limits atom indexes to 32 bits. */
+ if(MAX_ATOM_TABLE_SIZE > MAX_ATAG_ATOM_ID) {
+ if (atom_val(id) > MAX_ATAG_ATOM_ID) {
+ id = am_system;
+ }
+ }
+ }
+
+ return MAKE_ATAG(id, type);
+}
+
+static void set_alloc_tag(Allctr_t *allocator, void *p, alcu_atag_t tag)
+{
+ Block_t *block;
+
+ ASSERT(DBG_IS_VALID_ATAG(allocator, tag));
+ ASSERT(allocator->atags && p);
+ (void)allocator;
+
+ block = UMEM2BLK(p);
+
+ SET_BLK_ATAG(block, tag);
+}
+
/* internal data... */
#if 0
@@ -776,6 +834,8 @@ static ERTS_INLINE void clr_bit(UWord* map, Uint ix)
&= ~((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
}
+#ifdef DEBUG
+
static ERTS_INLINE int is_bit_set(UWord* map, Uint ix)
{
ASSERT(ix / ERTS_VSPACE_WORD_BITS < VSPACE_MAP_SZ);
@@ -783,6 +843,8 @@ static ERTS_INLINE int is_bit_set(UWord* map, Uint ix)
& ((UWord)1 << (ix % ERTS_VSPACE_WORD_BITS));
}
+#endif
+
UWord erts_literal_vspace_map[VSPACE_MAP_SZ];
static void set_literal_range(void* start, Uint size)
@@ -821,7 +883,7 @@ static void clear_literal_range(void* start, Uint size)
#if HAVE_ERTS_MSEG
-void*
+static void*
erts_alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
{
void *res;
@@ -832,7 +894,7 @@ erts_alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
return res;
}
-void*
+static void*
erts_alcu_mseg_realloc(Allctr_t *allctr, void *seg,
Uint old_size, Uint *new_size_p)
{
@@ -845,7 +907,7 @@ erts_alcu_mseg_realloc(Allctr_t *allctr, void *seg,
return res;
}
-void
+static void
erts_alcu_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
{
erts_mseg_dealloc_opt(allctr->alloc_no, seg, (UWord) size, flags, &allctr->mseg_opt);
@@ -862,7 +924,7 @@ erts_alcu_literal_32_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
Uint sz = ERTS_SUPERALIGNED_CEILING(*size_p);
ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
allctr->t == 0);
- ERTS_SMP_LC_ASSERT(allctr->thread_safe);
+ ERTS_LC_ASSERT(allctr->thread_safe);
res = erts_alcu_mseg_alloc(allctr, &sz, flags);
if (res) {
@@ -880,7 +942,7 @@ erts_alcu_literal_32_mseg_realloc(Allctr_t *allctr, void *seg,
Uint new_sz = ERTS_SUPERALIGNED_CEILING(*new_size_p);
ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
allctr->t == 0);
- ERTS_SMP_LC_ASSERT(allctr->thread_safe);
+ ERTS_LC_ASSERT(allctr->thread_safe);
if (seg && old_size)
clear_literal_range(seg, old_size);
@@ -898,7 +960,7 @@ erts_alcu_literal_32_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
{
ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
allctr->t == 0);
- ERTS_SMP_LC_ASSERT(allctr->thread_safe);
+ ERTS_LC_ASSERT(allctr->thread_safe);
erts_alcu_mseg_dealloc(allctr, seg, size, flags);
@@ -907,7 +969,9 @@ erts_alcu_literal_32_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
#elif defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
-/* Used by literal allocator that has its own mmapper (super carrier) */
+/* For allocators that have their own mmapper (super carrier),
+ * like literal_alloc.
+ */
void*
erts_alcu_mmapper_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
{
@@ -948,9 +1012,53 @@ erts_alcu_mmapper_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size,
}
#endif /* ARCH_64 && ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION */
-#endif /* HAVE_ERTS_MSEG */
+#if defined(ERTS_ALC_A_EXEC)
+
+/*
+ * For exec_alloc that need memory with PROT_EXEC
+ */
+void*
+erts_alcu_exec_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
+{
+ void* res = erts_alcu_mseg_alloc(allctr, size_p, flags);
+
+ if (res) {
+ int r = mprotect(res, *size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ return res;
+}
void*
+erts_alcu_exec_mseg_realloc(Allctr_t *allctr, void *seg,
+ Uint old_size, Uint *new_size_p)
+{
+ void *res;
+
+ if (seg && old_size) {
+ int r = mprotect(seg, old_size, PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ res = erts_alcu_mseg_realloc(allctr, seg, old_size, new_size_p);
+ if (res) {
+ int r = mprotect(res, *new_size_p, PROT_EXEC | PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ }
+ return res;
+}
+
+void
+erts_alcu_exec_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, Uint flags)
+{
+ int r = mprotect(seg, size, PROT_READ | PROT_WRITE);
+ ASSERT(r == 0); (void)r;
+ erts_alcu_mseg_dealloc(allctr, seg, size, flags);
+}
+#endif /* ERTS_ALC_A_EXEC */
+
+#endif /* HAVE_ERTS_MSEG */
+
+static void*
erts_alcu_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign)
{
void *res;
@@ -967,7 +1075,7 @@ erts_alcu_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign)
return res;
}
-void*
+static void*
erts_alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint *size_p, Uint old_size, int superalign)
{
void *res;
@@ -989,7 +1097,7 @@ erts_alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint *size_p, Uint old_size,
return res;
}
-void
+static void
erts_alcu_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int superalign)
{
#if ERTS_SA_MB_CARRIERS && ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
@@ -1012,7 +1120,7 @@ erts_alcu_literal_32_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign)
Uint size = ERTS_SUPERALIGNED_CEILING(*size_p);
ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
allctr->t == 0);
- ERTS_SMP_LC_ASSERT(allctr->thread_safe);
+ ERTS_LC_ASSERT(allctr->thread_safe);
res = erts_alcu_sys_alloc(allctr, &size, 1);
if (res) {
@@ -1030,7 +1138,7 @@ erts_alcu_literal_32_sys_realloc(Allctr_t *allctr, void *ptr, Uint* size_p, Uint
ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
allctr->t == 0);
- ERTS_SMP_LC_ASSERT(allctr->thread_safe);
+ ERTS_LC_ASSERT(allctr->thread_safe);
if (ptr && old_size)
clear_literal_range(ptr, old_size);
@@ -1047,7 +1155,7 @@ erts_alcu_literal_32_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int sup
{
ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL &&
allctr->t == 0);
- ERTS_SMP_LC_ASSERT(allctr->thread_safe);
+ ERTS_LC_ASSERT(allctr->thread_safe);
erts_alcu_sys_dealloc(allctr, ptr, size, 1);
@@ -1143,90 +1251,23 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
ASSERT(crr->next);
crr->next->prev = crr->prev;
}
-}
-
-#ifdef ERTS_SMP
-
#ifdef DEBUG
-static int is_in_list(ErtsDoubleLink_t* sentinel, ErtsDoubleLink_t* node)
-{
- ErtsDoubleLink_t* p;
-
- ASSERT(node != sentinel);
- for (p = sentinel->next; p != sentinel; p = p->next) {
- if (p == node)
- return 1;
- }
- return 0;
-}
-#endif /* DEBUG */
-
-static ERTS_INLINE void
-link_edl_after(ErtsDoubleLink_t* after_me, ErtsDoubleLink_t* node)
-{
- ErtsDoubleLink_t* before_me = after_me->next;
- ASSERT(node != after_me && node != before_me);
- node->next = before_me;
- node->prev = after_me;
- before_me->prev = node;
- after_me->next = node;
-}
-
-static ERTS_INLINE void
-link_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
-{
- ErtsDoubleLink_t* after_me = before_me->prev;
- ASSERT(node != before_me && node != after_me);
- node->next = before_me;
- node->prev = after_me;
- before_me->prev = node;
- after_me->next = node;
-}
-
-static ERTS_INLINE void
-unlink_edl(ErtsDoubleLink_t* node)
-{
- node->next->prev = node->prev;
- node->prev->next = node->next;
-}
-
-static ERTS_INLINE void
-relink_edl_before(ErtsDoubleLink_t* before_me, ErtsDoubleLink_t* node)
-{
- if (node != before_me && node != before_me->prev) {
- unlink_edl(node);
- link_edl_before(before_me, node);
- }
+ crr->next = crr;
+ crr->prev = crr;
+#endif
}
static ERTS_INLINE int is_abandoned(Carrier_t *crr)
{
- return crr->cpool.abandoned.next != NULL;
-}
-
-static ERTS_INLINE void
-link_abandoned_carrier(ErtsDoubleLink_t* list, Carrier_t *crr)
-{
- ASSERT(!is_abandoned(crr));
-
- link_edl_after(list, &crr->cpool.abandoned);
-
- ASSERT(crr->cpool.abandoned.next != &crr->cpool.abandoned);
- ASSERT(crr->cpool.abandoned.prev != &crr->cpool.abandoned);
+ return crr->cpool.state != ERTS_MBC_IS_HOME;
}
static ERTS_INLINE void
unlink_abandoned_carrier(Carrier_t *crr)
{
- ASSERT(is_in_list(&crr->cpool.orig_allctr->cpool.pooled_list,
- &crr->cpool.abandoned) ||
- is_in_list(&crr->cpool.orig_allctr->cpool.traitor_list,
- &crr->cpool.abandoned));
-
- unlink_edl(&crr->cpool.abandoned);
-
- crr->cpool.abandoned.next = NULL;
- crr->cpool.abandoned.prev = NULL;
+ if (crr->cpool.state == ERTS_MBC_WAS_POOLED) {
+ aoff_remove_pooled_mbc(crr->cpool.orig_allctr, crr);
+ }
}
static ERTS_INLINE void
@@ -1234,28 +1275,22 @@ clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
{
if (crr) {
erts_aint_t max_size;
- erts_aint_t new_val;
+ erts_aint_t iallctr;
max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr);
erts_atomic_set_nob(&crr->cpool.max_size, max_size);
- new_val = (((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
-
-#ifdef ERTS_ALC_CPOOL_DEBUG
- {
- erts_aint_t old_val = new_val|ERTS_CRR_ALCTR_FLG_BUSY;
+ iallctr = erts_atomic_read_nob(&crr->allctr);
+ ERTS_ALC_CPOOL_ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
+ == ((erts_aint_t)allctr |
+ ERTS_CRR_ALCTR_FLG_IN_POOL |
+ ERTS_CRR_ALCTR_FLG_BUSY));
- ERTS_ALC_CPOOL_ASSERT(old_val
- == erts_smp_atomic_xchg_relb(&crr->allctr,
- new_val));
- }
-#else
- erts_smp_atomic_set_relb(&crr->allctr, new_val);
-#endif
+ iallctr &= ~ERTS_CRR_ALCTR_FLG_BUSY;
+ erts_atomic_set_relb(&crr->allctr, iallctr);
}
}
-#endif /* ERTS_SMP */
#if 0
#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \
@@ -1279,12 +1314,10 @@ chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before)
static void *mbc_alloc(Allctr_t *allctr, Uint size);
-#ifdef ERTS_SMP
typedef struct {
ErtsAllctrDDBlock_t ddblock__; /* must be first */
ErtsAlcType_t fix_type;
} ErtsAllctrFixDDBlock_t;
-#endif
#define ERTS_ALC_FIX_NO_UNUSE (((ErtsAlcType_t) 1) << ERTS_ALC_N_BITS)
@@ -1295,11 +1328,9 @@ dealloc_fix_block(Allctr_t *allctr,
ErtsAlcFixList_t *fix,
int dec_cc_on_redirect)
{
-#ifdef ERTS_SMP
/* May be redirected... */
ASSERT((type & ERTS_ALC_FIX_NO_UNUSE) == 0);
((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type | ERTS_ALC_FIX_NO_UNUSE;
-#endif
dealloc_block(allctr, ptr, fix, dec_cc_on_redirect);
}
@@ -1333,12 +1364,10 @@ fix_cpool_check_shrink(Allctr_t *allctr,
fix->u.cpool.shrink_list = 0;
else {
void *p;
-#ifdef ERTS_SMP
if (busy_pcrr_pp) {
clear_busy_pool_carrier(allctr, *busy_pcrr_pp);
*busy_pcrr_pp = NULL;
}
-#endif
fix->u.cpool.shrink_list--;
p = fix->list;
fix->list = *((void **) p);
@@ -1361,6 +1390,7 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
&& type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+ ASSERT(size == fix->type_size);
res = fix->list;
if (res) {
@@ -1372,8 +1402,6 @@ fix_cpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
fix_cpool_check_shrink(allctr, type, fix, NULL);
return res;
}
- if (size < 2*sizeof(UWord))
- size += sizeof(UWord);
if (size >= allctr->sbc_threshold) {
Block_t *blk;
blk = create_carrier(allctr, size, CFLG_SBC);
@@ -1432,10 +1460,8 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
int ix, o;
int flush = flgs == 0;
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
ErtsAlcFixList_t *fix = &allctr->fix[ix];
@@ -1475,10 +1501,8 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
if (all_empty)
sched_fix_shrink(allctr, 0);
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
return res;
}
@@ -1493,6 +1517,7 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
&& type <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
fix = &allctr->fix[type - ERTS_ALC_N_MIN_A_FIXED_SIZE];
+ ASSERT(size == fix->type_size);
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
fix->u.nocpool.used++;
@@ -1515,8 +1540,6 @@ fix_nocpool_alloc(Allctr_t *allctr, ErtsAlcType_t type, Uint size)
ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
return res;
}
- if (size < 2*sizeof(UWord))
- size += sizeof(UWord);
if (fix->u.nocpool.limit < fix->u.nocpool.used)
fix->u.nocpool.limit = fix->u.nocpool.used;
if (fix->u.nocpool.max_used < fix->u.nocpool.used)
@@ -1591,10 +1614,8 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
int ix, o;
int flush = flgs == 0;
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
ErtsAlcFixList_t *fix = &allctr->fix[ix];
@@ -1636,10 +1657,8 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
if (all_empty)
sched_fix_shrink(allctr, 0);
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
return res;
}
@@ -1665,7 +1684,11 @@ dealloc_mbc(Allctr_t *allctr, Carrier_t *crr)
dealloc_carrier(allctr, crr, 1);
}
-#ifdef ERTS_SMP
+
+static void set_new_allctr_abandon_limit(Allctr_t*);
+static void abandon_carrier(Allctr_t*, Carrier_t*);
+static void poolify_my_carrier(Allctr_t*, Carrier_t*);
+static void enqueue_homecoming(Allctr_t*, Carrier_t*);
static ERTS_INLINE Allctr_t*
get_pref_allctr(void *extra)
@@ -1706,7 +1729,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
crr = BLK_TO_SBC(blk);
if (sizep)
*sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ;
- iallctr = erts_smp_atomic_read_dirty(&crr->allctr);
+ iallctr = erts_atomic_read_dirty(&crr->allctr);
}
else {
crr = ABLK_TO_MBC(blk);
@@ -1714,10 +1737,10 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
if (sizep)
*sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ;
if (!ERTS_ALC_IS_CPOOL_ENABLED(pref_allctr))
- iallctr = erts_smp_atomic_read_dirty(&crr->allctr);
+ iallctr = erts_atomic_read_dirty(&crr->allctr);
else {
int locked_pref_allctr = 0;
- iallctr = erts_smp_atomic_read_ddrb(&crr->allctr);
+ iallctr = erts_atomic_read_ddrb(&crr->allctr);
if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock
&& pref_allctr->thread_safe) {
@@ -1733,9 +1756,23 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
erts_aint_t act;
ERTS_ALC_CPOOL_ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
- act = erts_smp_atomic_cmpxchg_ddrb(&crr->allctr,
- iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
- iallctr);
+ if (iallctr & ERTS_CRR_ALCTR_FLG_HOMECOMING) {
+ /*
+ * This carrier has just been given back to us by writing
+ * to crr->allctr with a write barrier (see abandon_carrier).
+ *
+ * We need a mathing read barrier to guarantee a correct view
+ * of the carrier for deallocation work.
+ */
+ act = erts_atomic_cmpxchg_rb(&crr->allctr,
+ iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
+ iallctr);
+ }
+ else {
+ act = erts_atomic_cmpxchg_ddrb(&crr->allctr,
+ iallctr|ERTS_CRR_ALCTR_FLG_BUSY,
+ iallctr);
+ }
if (act == iallctr) {
*busy_pcrr_pp = crr;
break;
@@ -1751,13 +1788,6 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep,
erts_mtx_unlock(&pref_allctr->mutex);
}
}
-
- ERTS_ALC_CPOOL_ASSERT(
- (((iallctr & ~ERTS_CRR_ALCTR_FLG_MASK) == (erts_aint_t) pref_allctr)
- ? (((iallctr & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL)
- || ((iallctr & ERTS_CRR_ALCTR_FLG_MASK) == 0))
- : 1));
-
return used_allctr;
}
}
@@ -2009,9 +2039,9 @@ handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr)
/* Carrier migrated; need to redirect block to new owner... */
int cinit = used_allctr->dd.ix - allctr->dd.ix;
- ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
+ ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
- DEC_CC(allctr->calls.this_free);
+ DEC_CC(allctr->calls.this_free);
((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type;
if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
@@ -2020,8 +2050,9 @@ handle_delayed_fix_dealloc(Allctr_t *allctr, void *ptr)
}
}
-static void
-schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr);
+static void schedule_dealloc_carrier(Allctr_t*, Carrier_t*);
+static void dealloc_my_carrier(Allctr_t*, Carrier_t*);
+
static ERTS_INLINE int
handle_delayed_dealloc(Allctr_t *allctr,
@@ -2083,39 +2114,61 @@ handle_delayed_dealloc(Allctr_t *allctr,
res = 1;
blk = UMEM2BLK(ptr);
- if (IS_FREE_LAST_MBC_BLK(blk)) {
+ if (blk->bhdr == HOMECOMING_MBC_BLK_HDR) {
/*
* A multiblock carrier that previously has been migrated away
- * from us and now is back to be deallocated. For more info
- * see schedule_dealloc_carrier().
- *
- * Note that we cannot use FBLK_TO_MBC(blk) since it
- * data has been overwritten by the queue.
+ * from us, was sent back to us either because
+ * - it became empty and we need to deallocated it, or
+ * - it was inserted into the pool and we need to update our pooled_tree
*/
- Carrier_t *crr = FIRST_BLK_TO_MBC(allctr, blk);
-
- /* Restore word overwritten by the dd-queue as it will be read
- * if this carrier is pulled from dc_list by cpool_fetch()
- */
- ERTS_ALC_CPOOL_ASSERT(FBLK_TO_MBC(blk) != crr);
- ERTS_CT_ASSERT(sizeof(ErtsAllctrDDBlock_t) == sizeof(void*));
-#ifdef MBC_ABLK_OFFSET_BITS
- blk->u.carrier = crr;
-#else
- blk->carrier = crr;
-#endif
+ Carrier_t *crr = ErtsContainerStruct(blk, Carrier_t,
+ cpool.homecoming_dd.blk);
+ Block_t* first_blk = MBC_TO_FIRST_BLK(allctr, crr);
+ erts_aint_t iallctr;
ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr));
ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
- ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr)
- != (erts_smp_atomic_read_nob(&crr->allctr)
- & ~ERTS_CRR_ALCTR_FLG_MASK));
-
- erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
- schedule_dealloc_carrier(allctr, crr);
+ iallctr = erts_atomic_read_nob(&crr->allctr);
+ ASSERT(iallctr & ERTS_CRR_ALCTR_FLG_HOMECOMING);
+ while (1) {
+ if ((iallctr & (~ERTS_CRR_ALCTR_FLG_MASK |
+ ERTS_CRR_ALCTR_FLG_IN_POOL))
+ == (erts_aint_t)allctr) {
+ /*
+ * Carrier is home (mine and not in pool)
+ */
+ ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
+ erts_atomic_set_nob(&crr->allctr, (erts_aint_t)allctr);
+ if (IS_FREE_LAST_MBC_BLK(first_blk))
+ dealloc_my_carrier(allctr, crr);
+ else
+ ASSERT(crr->cpool.state == ERTS_MBC_IS_HOME);
+ }
+ else {
+ erts_aint_t exp = iallctr;
+ erts_aint_t want = iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING;
+
+ iallctr = erts_atomic_cmpxchg_nob(&crr->allctr,
+ want,
+ exp);
+ if (iallctr != exp)
+ continue; /* retry */
+
+ ASSERT(crr->cpool.state != ERTS_MBC_IS_HOME);
+ unlink_abandoned_carrier(crr);
+ if (iallctr & ERTS_CRR_ALCTR_FLG_IN_POOL)
+ poolify_my_carrier(allctr, crr);
+ else
+ crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
+ }
+ break;
+ }
}
else {
+ ASSERT(IS_SBC_BLK(blk) || (ABLK_TO_MBC(blk) !=
+ ErtsContainerStruct(blk, Carrier_t,
+ cpool.homecoming_dd.blk)));
INC_CC(allctr->calls.this_free);
@@ -2157,22 +2210,26 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type,
erts_alloc_notify_delayed_dealloc(allctr->ix);
}
-#endif
-
-#ifdef ERTS_SMP
-static void
-set_new_allctr_abandon_limit(Allctr_t *allctr);
-static void
-abandon_carrier(Allctr_t *allctr, Carrier_t *crr);
-
+static ERTS_INLINE void
+update_pooled_tree(Allctr_t *allctr, Carrier_t *crr, Uint blk_sz)
+{
+ if (allctr == crr->cpool.orig_allctr && crr->cpool.state == ERTS_MBC_WAS_POOLED) {
+ /*
+ * Update pooled_tree with a potentially new (larger) max_sz
+ */
+ AOFF_RBTree_t* crr_node = &crr->cpool.pooled;
+ if (blk_sz > crr_node->hdr.bhdr) {
+ crr_node->hdr.bhdr = blk_sz;
+ erts_aoff_larger_max_size(crr_node);
+ }
+ }
+}
static ERTS_INLINE void
check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)
{
Carrier_t *crr;
-
- if (busy_pcrr_pp && *busy_pcrr_pp)
- return;
+ UWord ncrr_in_pool, largest_fblk;
if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
return;
@@ -2181,8 +2238,7 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)
if (--allctr->cpool.check_limit_count <= 0)
set_new_allctr_abandon_limit(allctr);
- if (!erts_thr_progress_is_managed_thread())
- return;
+ ASSERT(erts_thr_progress_is_managed_thread());
if (allctr->cpool.disable_abandon)
return;
@@ -2190,6 +2246,9 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)
if (allctr->mbcs.blocks.curr.size > allctr->cpool.abandon_limit)
return;
+ ncrr_in_pool = erts_atomic_read_nob(&allctr->cpool.stat.no_carriers);
+ if (ncrr_in_pool >= allctr->cpool.in_pool_limit)
+ return;
crr = FBLK_TO_MBC(fblk);
@@ -2200,9 +2259,14 @@ check_abandon_carrier(Allctr_t *allctr, Block_t *fblk, Carrier_t **busy_pcrr_pp)
return;
if (crr->cpool.thr_prgr != ERTS_THR_PRGR_INVALID
- && !erts_thr_progress_has_reached(crr->cpool.thr_prgr))
- return;
+ && !erts_thr_progress_has_reached(crr->cpool.thr_prgr))
+ return;
+
+ largest_fblk = allctr->largest_fblk_in_mbc(allctr, crr);
+ if (largest_fblk < allctr->cpool.fblk_min_limit)
+ return;
+ erts_atomic_set_nob(&crr->cpool.max_size, largest_fblk);
abandon_carrier(allctr, crr);
}
@@ -2221,7 +2285,6 @@ erts_alcu_check_delayed_dealloc(Allctr_t *allctr,
thr_prgr_p,
more_work);
}
-#endif
#define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \
handle_delayed_dealloc((Allctr), (Locked), 1, \
@@ -2232,29 +2295,24 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_
{
Block_t *blk = UMEM2BLK(ptr);
- ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ ERTS_LC_ASSERT(!allctr->thread_safe
|| erts_lc_mtx_is_locked(&allctr->mutex));
if (IS_SBC_BLK(blk)) {
destroy_carrier(allctr, blk, NULL);
-#ifdef ERTS_SMP
if (fix && ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type;
if (!(type & ERTS_ALC_FIX_NO_UNUSE))
fix->u.cpool.used--;
fix->u.cpool.allocated--;
}
-#endif
}
-#ifndef ERTS_SMP
- else
- mbc_free(allctr, ptr, NULL);
-#else
else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
mbc_free(allctr, ptr, NULL);
else {
Carrier_t *busy_pcrr_p;
Allctr_t *used_allctr;
+
used_allctr = get_used_allctr(allctr, ERTS_ALC_TS_PREF_LOCK_NO, ptr,
NULL, &busy_pcrr_p);
if (used_allctr == allctr) {
@@ -2271,15 +2329,14 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_
/* Carrier migrated; need to redirect block to new owner... */
int cinit = used_allctr->dd.ix - allctr->dd.ix;
- ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
+ ERTS_ALC_CPOOL_ASSERT(!busy_pcrr_p);
- if (dec_cc_on_redirect)
- DEC_CC(allctr->calls.this_free);
+ if (dec_cc_on_redirect)
+ DEC_CC(allctr->calls.this_free);
if (ddq_enqueue(&used_allctr->dd.q, ptr, cinit))
erts_alloc_notify_delayed_dealloc(used_allctr->ix);
}
}
-#endif
}
/* Multi block carrier alloc/realloc/free ... */
@@ -2519,17 +2576,16 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(IS_MBC_BLK(blk));
- if (is_first_blk
- && is_last_blk
- && allctr->main_carrier != FIRST_BLK_TO_MBC(allctr, blk)) {
- destroy_carrier(allctr, blk, busy_pcrr_pp);
+ if (is_first_blk && is_last_blk && crr != allctr->main_carrier) {
+ destroy_carrier(allctr, blk, busy_pcrr_pp);
}
else {
(*allctr->link_free_block)(allctr, blk);
HARD_CHECK_BLK_CARRIER(allctr, blk);
-#ifdef ERTS_SMP
- check_abandon_carrier(allctr, blk, busy_pcrr_pp);
-#endif
+ if (busy_pcrr_pp && *busy_pcrr_pp)
+ update_pooled_tree(allctr, crr, blk_sz);
+ else
+ check_abandon_carrier(allctr, blk, busy_pcrr_pp);
}
}
@@ -2563,10 +2619,19 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
return NULL;
#else /* !MBC_REALLOC_ALWAYS_MOVES */
-#ifdef ERTS_SMP
- if (busy_pcrr_pp && *busy_pcrr_pp)
- goto realloc_move; /* Don't want to use carrier in pool */
-#endif
+ if (busy_pcrr_pp && *busy_pcrr_pp) {
+ /*
+ * Don't want to use carrier in pool
+ */
+ new_p = mbc_alloc(allctr, size);
+ if (!new_p)
+ return NULL;
+ new_blk = UMEM2BLK(new_p);
+ ASSERT(!(IS_MBC_BLK(new_blk) && ABLK_TO_MBC(new_blk) == *busy_pcrr_pp));
+ sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
+ mbc_free(allctr, p, busy_pcrr_pp);
+ return new_p;
+ }
get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size);
@@ -2687,9 +2752,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
HARD_CHECK_BLK_CARRIER(allctr, blk);
-#ifdef ERTS_SMP
check_abandon_carrier(allctr, nxt_blk, NULL);
-#endif
return p;
}
@@ -2801,9 +2864,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
if (cand_blk_sz < get_blk_sz) {
/* We wont fit in cand_blk get a new one */
-#ifdef ERTS_SMP
- realloc_move:
-#endif
+
#endif /* !MBC_REALLOC_ALWAYS_MOVES */
new_p = mbc_alloc(allctr, size);
@@ -2905,11 +2966,9 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
#endif /* !MBC_REALLOC_ALWAYS_MOVES */
}
-#ifdef ERTS_SMP
#define ERTS_ALC_MAX_DEALLOC_CARRIER 10
-#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 20
-#define ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT 10
+#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 100
#define ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT 100
#define ERTS_ALC_CPOOL_MAX_FAILED_STAT_READS 3
@@ -3073,19 +3132,18 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
ErtsAlcCPoolData_t *cpd1p, *cpd2p;
erts_aint_t val;
ErtsAlcCPoolData_t *sentinel = &carrier_pool[allctr->alloc_no].sentinel;
+ Allctr_t *orig_allctr = crr->cpool.orig_allctr;
ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
|| erts_thr_progress_is_managed_thread());
- ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr)
- == (erts_aint_t) allctr);
- erts_atomic_add_nob(&allctr->cpool.stat.blocks_size,
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.blocks_size,
(erts_aint_t) crr->cpool.blocks_size);
- erts_atomic_add_nob(&allctr->cpool.stat.no_blocks,
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.no_blocks,
(erts_aint_t) crr->cpool.blocks);
- erts_atomic_add_nob(&allctr->cpool.stat.carriers_size,
+ erts_atomic_add_nob(&orig_allctr->cpool.stat.carriers_size,
(erts_aint_t) CARRIER_SZ(crr));
- erts_atomic_inc_nob(&allctr->cpool.stat.no_carriers);
+ erts_atomic_inc_nob(&orig_allctr->cpool.stat.no_carriers);
/*
* We search in 'next' direction and begin by passing
@@ -3146,8 +3204,6 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr)
(erts_aint_t) &crr->cpool,
(erts_aint_t) cpd1p);
- erts_smp_atomic_set_wb(&crr->allctr,
- ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL);
LTTNG3(carrier_pool_put, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, CARRIER_SZ(crr));
}
@@ -3249,130 +3305,126 @@ cpool_delete(Allctr_t *allctr, Allctr_t *prev_allctr, Carrier_t *crr)
static Carrier_t *
cpool_fetch(Allctr_t *allctr, UWord size)
{
- int i, i_stop, has_passed_sentinel;
+ enum { IGNORANT, HAS_SEEN_SENTINEL, THE_LAST_ONE } loop_state;
+ int i;
Carrier_t *crr;
+ Carrier_t *reinsert_crr = NULL;
ErtsAlcCPoolData_t *cpdp;
- ErtsAlcCPoolData_t *cpool_entrance;
+ ErtsAlcCPoolData_t *cpool_entrance = NULL;
ErtsAlcCPoolData_t *sentinel;
- ErtsDoubleLink_t* dl;
- ErtsDoubleLink_t* first_old_traitor;
ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */
|| erts_thr_progress_is_managed_thread());
i = ERTS_ALC_CPOOL_MAX_FETCH_INSPECT;
- first_old_traitor = allctr->cpool.traitor_list.next;
- cpool_entrance = NULL;
LTTNG3(carrier_pool_get, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, (unsigned long)size);
/*
- * Search my own pooled_list,
+ * Search my own pooled_tree,
* i.e my abandoned carriers that were in the pool last time I checked.
*/
+ do {
+ erts_aint_t exp, act;
+
+ crr = aoff_lookup_pooled_mbc(allctr, size);
+ if (!crr)
+ break;
+
+ ASSERT(crr->cpool.state == ERTS_MBC_WAS_POOLED);
+ ASSERT(crr->cpool.orig_allctr == allctr);
+
+ aoff_remove_pooled_mbc(allctr, crr);
+
+ exp = erts_atomic_read_nob(&crr->allctr);
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ ASSERT((exp & ~ERTS_CRR_ALCTR_FLG_MASK) == (erts_aint_t)allctr);
+ if (erts_atomic_read_nob(&crr->cpool.max_size) < size) {
+ /*
+ * This carrier has been fetched and inserted back again
+ * by a foreign allocator. That's why it has a stale search size.
+ */
+ ASSERT(exp & ERTS_CRR_ALCTR_FLG_HOMECOMING);
+ crr->cpool.pooled.hdr.bhdr = erts_atomic_read_nob(&crr->cpool.max_size);
+ aoff_add_pooled_mbc(allctr, crr);
+ INC_CC(allctr->cpool.stat.skip_size);
+ continue;
+ }
+ else if (exp & ERTS_CRR_ALCTR_FLG_BUSY) {
+ /*
+ * This must be our own carrier as part of a realloc call.
+ * Skip it to make things simpler.
+ * Must wait to re-insert to not be found again by lookup.
+ */
+ ASSERT(!reinsert_crr);
+ reinsert_crr = crr;
+ INC_CC(allctr->cpool.stat.skip_busy);
+ continue;
+ }
+
+ /* Try to fetch it... */
+ act = erts_atomic_cmpxchg_mb(&crr->allctr,
+ exp & ~ERTS_CRR_ALCTR_FLG_IN_POOL,
+ exp);
+ if (act == exp) {
+ cpool_delete(allctr, allctr, crr);
+ crr->cpool.state = ERTS_MBC_IS_HOME;
+
+ if (reinsert_crr)
+ aoff_add_pooled_mbc(allctr, reinsert_crr);
+ return crr;
+ }
+ exp = act;
+ INC_CC(allctr->cpool.stat.skip_race);
+ }
+ else
+ INC_CC(allctr->cpool.stat.skip_not_pooled);
- dl = allctr->cpool.pooled_list.next;
- while(dl != &allctr->cpool.pooled_list) {
- erts_aint_t exp, act;
- crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
-
- ASSERT(!is_in_list(&allctr->cpool.traitor_list, dl));
- ASSERT(crr->cpool.orig_allctr == allctr);
- dl = dl->next;
- exp = erts_smp_atomic_read_rb(&crr->allctr);
- if ((exp & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL
- && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
- /* Try to fetch it... */
- act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
- (erts_aint_t) allctr,
- exp);
- if (act == exp) {
- cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
- unlink_abandoned_carrier(crr);
-
- /* Move sentinel to continue next search from here */
- relink_edl_before(dl, &allctr->cpool.pooled_list);
- return crr;
- }
- exp = act;
- }
- if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
- if (!cpool_entrance)
- cpool_entrance = &crr->cpool;
- }
- else { /* Not in pool, move to traitor_list */
- unlink_abandoned_carrier(crr);
- link_abandoned_carrier(&allctr->cpool.traitor_list, crr);
- }
- if (--i <= 0) {
- /* Move sentinel to continue next search from here */
- relink_edl_before(dl, &allctr->cpool.pooled_list);
- return NULL;
- }
- }
-
- /* Now search traitor_list.
- * i.e carriers employed by other allocators last time I checked.
- * They might have been abandoned since then.
- */
+ /* Not in pool anymore */
+ ASSERT(!(exp & ERTS_CRR_ALCTR_FLG_BUSY));
+ crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
- i_stop = (i < ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT ?
- 0 : i - ERTS_ALC_CPOOL_MAX_TRAITOR_INSPECT);
- dl = first_old_traitor;
- while(dl != &allctr->cpool.traitor_list) {
- erts_aint_t exp, act;
- crr = (Carrier_t *) (((char *) dl) - offsetof(Carrier_t, cpool.abandoned));
- ASSERT(dl != &allctr->cpool.pooled_list);
- ASSERT(crr->cpool.orig_allctr == allctr);
- dl = dl->next;
- exp = erts_smp_atomic_read_rb(&crr->allctr);
- if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
- if (!(exp & ERTS_CRR_ALCTR_FLG_BUSY)
- && erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
- /* Try to fetch it... */
- act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
- (erts_aint_t) allctr,
- exp);
- if (act == exp) {
- cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
- unlink_abandoned_carrier(crr);
+ }while (--i > 0);
- /* Move sentinel to continue next search from here */
- relink_edl_before(dl, &allctr->cpool.traitor_list);
- return crr;
- }
- exp = act;
- }
- if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
- if (!cpool_entrance)
- cpool_entrance = &crr->cpool;
+ if (reinsert_crr)
+ aoff_add_pooled_mbc(allctr, reinsert_crr);
- /* Move to pooled_list */
- unlink_abandoned_carrier(crr);
- link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
- }
- }
- if (--i <= i_stop) {
- /* Move sentinel to continue next search from here */
- relink_edl_before(dl, &allctr->cpool.traitor_list);
- if (i > 0)
- break;
- else
- return NULL;
- }
+ /*
+ * Try find a nice cpool_entrance
+ */
+ while (allctr->cpool.pooled_tree) {
+ erts_aint_t iallctr;
+
+ crr = ErtsContainerStruct(allctr->cpool.pooled_tree, Carrier_t, cpool.pooled);
+ iallctr = erts_atomic_read_nob(&crr->allctr);
+ if (iallctr & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ cpool_entrance = &crr->cpool;
+ break;
+ }
+ /* Not in pool anymore */
+ ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY));
+ aoff_remove_pooled_mbc(allctr, crr);
+ crr->cpool.state = ERTS_MBC_WAS_TRAITOR;
+
+ if (--i <= 0) {
+ INC_CC(allctr->cpool.stat.fail_pooled);
+ return NULL;
+ }
}
+
/*
* Finally search the shared pool and try employ foreign carriers
*/
-
sentinel = &carrier_pool[allctr->alloc_no].sentinel;
if (cpool_entrance) {
- /* We saw a pooled carried above, use it as entrance into the pool
+ /*
+ * We saw a pooled carried above, use it as entrance into the pool
*/
cpdp = cpool_entrance;
}
else {
- /* No pooled carried seen above. Start search at cpool sentinel,
+ /*
+ * No pooled carried seen above. Start search at cpool sentinel,
* but begin by passing one element before trying to fetch.
* This in order to avoid contention with threads inserting elements.
*/
@@ -3382,8 +3434,8 @@ cpool_fetch(Allctr_t *allctr, UWord size)
goto check_dc_list;
}
- has_passed_sentinel = 0;
- while (1) {
+ loop_state = IGNORANT;
+ do {
erts_aint_t exp;
cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
if (cpdp == cpool_entrance) {
@@ -3392,38 +3444,52 @@ cpool_fetch(Allctr_t *allctr, UWord size)
if (cpdp == sentinel)
break;
}
- i = 0; /* Last one to inspect */
+ loop_state = THE_LAST_ONE;
}
else if (cpdp == sentinel) {
- if (has_passed_sentinel) {
+ if (loop_state == HAS_SEEN_SENTINEL) {
/* We been here before. cpool_entrance must have been removed */
+ INC_CC(allctr->cpool.stat.entrance_removed);
break;
}
cpdp = cpool_aint2cpd(cpool_read(&cpdp->prev));
if (cpdp == sentinel)
break;
- has_passed_sentinel = 1;
+ loop_state = HAS_SEEN_SENTINEL;
}
- crr = (Carrier_t *)(((char *)cpdp) - offsetof(Carrier_t, cpool));
- exp = erts_smp_atomic_read_rb(&crr->allctr);
- if (((exp & (ERTS_CRR_ALCTR_FLG_MASK)) == ERTS_CRR_ALCTR_FLG_IN_POOL)
- && (erts_atomic_read_nob(&cpdp->max_size) >= size)) {
+ crr = ErtsContainerStruct(cpdp, Carrier_t, cpool);
+ exp = erts_atomic_read_rb(&crr->allctr);
+
+ if (erts_atomic_read_nob(&cpdp->max_size) < size) {
+ INC_CC(allctr->cpool.stat.skip_size);
+ }
+ else if ((exp & (ERTS_CRR_ALCTR_FLG_IN_POOL | ERTS_CRR_ALCTR_FLG_BUSY))
+ == ERTS_CRR_ALCTR_FLG_IN_POOL) {
erts_aint_t act;
- /* Try to fetch it... */
- act = erts_smp_atomic_cmpxchg_mb(&crr->allctr,
- (erts_aint_t) allctr,
- exp);
+ erts_aint_t want = (((erts_aint_t) allctr)
+ | (exp & ERTS_CRR_ALCTR_FLG_HOMECOMING));
+ /* Try to fetch it... */
+ act = erts_atomic_cmpxchg_mb(&crr->allctr, want, exp);
if (act == exp) {
cpool_delete(allctr, ((Allctr_t *) (act & ~ERTS_CRR_ALCTR_FLG_MASK)), crr);
if (crr->cpool.orig_allctr == allctr) {
unlink_abandoned_carrier(crr);
- }
+ crr->cpool.state = ERTS_MBC_IS_HOME;
+ }
return crr;
}
}
- if (--i <= 0)
+
+ if (exp & ERTS_CRR_ALCTR_FLG_BUSY)
+ INC_CC(allctr->cpool.stat.skip_busy);
+ else
+ INC_CC(allctr->cpool.stat.skip_race);
+
+ if (--i <= 0) {
+ INC_CC(allctr->cpool.stat.fail_shared);
return NULL;
- }
+ }
+ }while (loop_state != THE_LAST_ONE);
check_dc_list:
/* Last; check our own pending dealloc carrier list... */
@@ -3432,23 +3498,23 @@ check_dc_list:
if (erts_atomic_read_nob(&crr->cpool.max_size) >= size) {
Block_t* blk;
unlink_carrier(&allctr->cpool.dc_list, crr);
-#ifdef ERTS_ALC_CPOOL_DEBUG
- ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr,
- ((erts_aint_t) allctr))
- == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK));
-#else
- erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
-#endif
+ ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr)
+ == ((erts_aint_t) allctr));
blk = MBC_TO_FIRST_BLK(allctr, crr);
ASSERT(FBLK_TO_MBC(blk) == crr);
allctr->link_free_block(allctr, blk);
return crr;
}
crr = crr->prev;
- if (--i <= 0)
+ if (--i <= 0) {
+ INC_CC(allctr->cpool.stat.fail_pend_dealloc);
return NULL;
+ }
}
+ if (i != ERTS_ALC_CPOOL_MAX_FETCH_INSPECT)
+ INC_CC(allctr->cpool.stat.fail);
+
return NULL;
}
@@ -3503,9 +3569,6 @@ static void
schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
{
Allctr_t *orig_allctr;
- Block_t *blk;
- int check_pending_dealloc;
- erts_aint_t max_size;
ASSERT(IS_MB_CARRIER(crr));
@@ -3516,9 +3579,17 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
orig_allctr = crr->cpool.orig_allctr;
- if (allctr != orig_allctr) {
- int cinit = orig_allctr->dd.ix - allctr->dd.ix;
-
+ if (allctr == orig_allctr) {
+ if (!(erts_atomic_read_nob(&crr->allctr) & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
+ dealloc_my_carrier(allctr, crr);
+ }
+ /*else
+ * Carrier was abandoned earlier by other thread and
+ * is still waiting for us in dd-queue.
+ * handle_delayed_dealloc() will handle it when crr is dequeued.
+ */
+ }
+ else {
/*
* We send the carrier to its origin for deallocation.
* This in order:
@@ -3527,29 +3598,39 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
* - to ensure that we always only reuse empty carriers
* originating from our own thread specific mseg_alloc
* instance which is beneficial on NUMA systems.
- *
- * The receiver will recognize that this is a carrier to
- * deallocate (and not a block which is the common case)
- * since the block is an mbc block that is free and last
- * in the carrier.
*/
- blk = MBC_TO_FIRST_BLK(allctr, crr);
- ERTS_ALC_CPOOL_ASSERT(IS_FREE_LAST_MBC_BLK(blk));
+ erts_aint_t iallctr;
+#ifdef ERTS_ALC_CPOOL_DEBUG
+ Block_t* first_blk = MBC_TO_FIRST_BLK(allctr, crr);
+ ERTS_ALC_CPOOL_ASSERT(IS_FREE_LAST_MBC_BLK(first_blk));
- ERTS_ALC_CPOOL_ASSERT(IS_MBC_FIRST_ABLK(allctr, blk));
- ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(blk));
- ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, blk));
- ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr)
- == (erts_smp_atomic_read_nob(&crr->allctr)
- & ~ERTS_CRR_ALCTR_FLG_MASK));
+ ERTS_ALC_CPOOL_ASSERT(IS_MBC_FIRST_ABLK(allctr, first_blk));
+ ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(first_blk));
+ ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, first_blk));
+ ERTS_ALC_CPOOL_ASSERT((erts_atomic_read_nob(&crr->allctr)
+ & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
+ == (erts_aint_t) allctr);
+#endif
- if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(blk), cinit))
- erts_alloc_notify_delayed_dealloc(orig_allctr->ix);
- return;
+ iallctr = (erts_aint_t)orig_allctr | ERTS_CRR_ALCTR_FLG_HOMECOMING;
+ if (!(erts_atomic_xchg_nob(&crr->allctr, iallctr)
+ & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
+ enqueue_homecoming(allctr, crr);
+ }
}
+}
+
+static void dealloc_my_carrier(Allctr_t *allctr, Carrier_t *crr)
+{
+ Block_t *blk;
+ int check_pending_dealloc;
+ erts_aint_t max_size;
- if (is_abandoned(crr))
- unlink_abandoned_carrier(crr);
+ ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
+ if (is_abandoned(crr)) {
+ unlink_abandoned_carrier(crr);
+ crr->cpool.state = ERTS_MBC_IS_HOME;
+ }
if (crr->cpool.thr_prgr == ERTS_THR_PRGR_INVALID
|| erts_thr_progress_has_reached(crr->cpool.thr_prgr)) {
@@ -3581,6 +3662,7 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr)
static ERTS_INLINE void
cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
{
+ crr->cpool.homecoming_dd.blk.bhdr = HOMECOMING_MBC_BLK_HDR;
erts_atomic_init_nob(&crr->cpool.next, ERTS_AINT_NULL);
erts_atomic_init_nob(&crr->cpool.prev, ERTS_AINT_NULL);
crr->cpool.orig_allctr = allctr;
@@ -3599,8 +3681,7 @@ cpool_init_carrier_data(Allctr_t *allctr, Carrier_t *crr)
limit = (csz/100)*allctr->cpool.util_limit;
crr->cpool.abandon_limit = limit;
}
- crr->cpool.abandoned.next = NULL;
- crr->cpool.abandoned.prev = NULL;
+ crr->cpool.state = ERTS_MBC_IS_HOME;
}
static void
@@ -3626,23 +3707,62 @@ set_new_allctr_abandon_limit(Allctr_t *allctr)
static void
abandon_carrier(Allctr_t *allctr, Carrier_t *crr)
{
- erts_aint_t max_size;
+ erts_aint_t iallctr;
- STAT_MBC_CPOOL_INSERT(allctr, crr);
+ STAT_MBC_ABANDON(allctr, crr);
unlink_carrier(&allctr->mbc_list, crr);
- if (crr->cpool.orig_allctr == allctr) {
- link_abandoned_carrier(&allctr->cpool.pooled_list, crr);
+ allctr->remove_mbc(allctr, crr);
+ set_new_allctr_abandon_limit(allctr);
+
+ cpool_insert(allctr, crr);
+
+
+ iallctr = erts_atomic_read_nob(&crr->allctr);
+ if (allctr == crr->cpool.orig_allctr) {
+ /* preserve HOMECOMING flag */
+ ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) == (erts_aint_t)allctr);
+ erts_atomic_set_wb(&crr->allctr, iallctr | ERTS_CRR_ALCTR_FLG_IN_POOL);
+ poolify_my_carrier(allctr, crr);
}
+ else {
+ ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING) == (erts_aint_t)allctr);
+ iallctr = ((erts_aint_t)crr->cpool.orig_allctr |
+ ERTS_CRR_ALCTR_FLG_HOMECOMING |
+ ERTS_CRR_ALCTR_FLG_IN_POOL);
+ if (!(erts_atomic_xchg_wb(&crr->allctr, iallctr)
+ & ERTS_CRR_ALCTR_FLG_HOMECOMING)) {
+
+ enqueue_homecoming(allctr, crr);
+ }
+ }
+}
- allctr->remove_mbc(allctr, crr);
+static void
+enqueue_homecoming(Allctr_t* allctr, Carrier_t* crr)
+{
+ Allctr_t* orig_allctr = crr->cpool.orig_allctr;
+ const int cinit = orig_allctr->dd.ix - allctr->dd.ix;
+ Block_t* dd_blk = &crr->cpool.homecoming_dd.blk;
- max_size = (erts_aint_t) allctr->largest_fblk_in_mbc(allctr, crr);
- erts_atomic_set_nob(&crr->cpool.max_size, max_size);
+ /*
+ * The receiver will recognize this as a carrier
+ * (and not a block which is the common case)
+ * since the block header is HOMECOMING_MBC_BLK_HDR.
+ */
+ ASSERT(dd_blk->bhdr == HOMECOMING_MBC_BLK_HDR);
+ if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(dd_blk), cinit))
+ erts_alloc_notify_delayed_dealloc(orig_allctr->ix);
+}
- cpool_insert(allctr, crr);
+static void
+poolify_my_carrier(Allctr_t *allctr, Carrier_t *crr)
+{
+ ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
- set_new_allctr_abandon_limit(allctr);
+ crr->cpool.pooled.hdr.bhdr = erts_atomic_read_nob(&crr->cpool.max_size);
+ aoff_add_pooled_mbc(allctr, crr);
+ crr->cpool.state = ERTS_MBC_WAS_POOLED;
}
static void
@@ -3691,7 +3811,6 @@ cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord *
}
-#endif /* ERTS_SMP */
#ifdef DEBUG
@@ -3792,7 +3911,6 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
}
-#ifdef ERTS_SMP
allctr->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON;
if ((flags & (CFLG_MBC|CFLG_NO_CPOOL)) == CFLG_MBC
@@ -3801,6 +3919,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
crr = cpool_fetch(allctr, blk_sz);
if (crr) {
STAT_MBC_CPOOL_FETCH(allctr, crr);
+ INC_CC(allctr->cpool.stat.fetch);
link_carrier(&allctr->mbc_list, crr);
(*allctr->add_mbc)(allctr, crr);
blk = (*allctr->get_free_block)(allctr, blk_sz, NULL, 0);
@@ -3808,7 +3927,6 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
return blk;
}
}
-#endif
#if HAVE_ERTS_MSEG
@@ -3938,9 +4056,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
allctr->main_carrier = crr;
}
-#ifdef ERTS_SMP
cpool_init_carrier_data(allctr, crr);
-#endif
link_carrier(&allctr->mbc_list, crr);
@@ -4153,26 +4269,29 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
ASSERT(IS_LAST_BLK(blk));
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- (*allctr->link_free_block)(allctr, blk, 0);
+ (*allctr->link_free_block)(allctr, blk);
HARD_CHECK_BLK_CARRIER(allctr, blk);
- (*allctr->unlink_free_block)(allctr, blk, 0);
+ (*allctr->unlink_free_block)(allctr, blk);
#endif
}
#endif
-#ifdef ERTS_SMP
if (busy_pcrr_pp && *busy_pcrr_pp) {
+ erts_aint_t iallctr = erts_atomic_read_nob(&crr->allctr);
ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr);
- *busy_pcrr_pp = NULL;
- ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr)
- == (((erts_aint_t) allctr)
- | ERTS_CRR_ALCTR_FLG_IN_POOL
- | ERTS_CRR_ALCTR_FLG_BUSY));
- erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr));
+ ERTS_ALC_CPOOL_ASSERT((iallctr & ~ERTS_CRR_ALCTR_FLG_HOMECOMING)
+ == (((erts_aint_t) allctr)
+ | ERTS_CRR_ALCTR_FLG_IN_POOL
+ | ERTS_CRR_ALCTR_FLG_BUSY));
+ ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr);
+
+ *busy_pcrr_pp = NULL;
+ erts_atomic_set_nob(&crr->allctr,
+ (iallctr & ~(ERTS_CRR_ALCTR_FLG_IN_POOL |
+ ERTS_CRR_ALCTR_FLG_BUSY)));
cpool_delete(allctr, allctr, crr);
}
else
-#endif
{
unlink_carrier(&allctr->mbc_list, crr);
#if HAVE_ERTS_MSEG
@@ -4203,11 +4322,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
}
#endif
-#ifdef ERTS_SMP
schedule_dealloc_carrier(allctr, crr);
-#else
- dealloc_mbc(allctr, crr);
-#endif
}
}
@@ -4223,7 +4338,7 @@ static struct {
Eterm e;
Eterm t;
Eterm ramv;
- Eterm sbct;
+ Eterm atags;
#if HAVE_ERTS_MSEG
Eterm asbcst;
Eterm rsbcst;
@@ -4240,6 +4355,8 @@ static struct {
Eterm smbcs;
Eterm mbcgs;
Eterm acul;
+ Eterm acnl;
+ Eterm acfml;
#if HAVE_ERTS_MSEG
Eterm mmc;
@@ -4250,9 +4367,18 @@ static struct {
Eterm fix_types;
Eterm mbcs;
-#ifdef ERTS_SMP
Eterm mbcs_pool;
-#endif
+ Eterm fetch;
+ Eterm fail_pooled;
+ Eterm fail_shared;
+ Eterm fail_pend_dealloc;
+ Eterm fail;
+ Eterm skip_size;
+ Eterm skip_busy;
+ Eterm skip_not_pooled;
+ Eterm skip_homecoming;
+ Eterm skip_race;
+ Eterm entrance_removed;
Eterm sbcs;
Eterm sys_alloc_carriers_size;
@@ -4282,11 +4408,11 @@ static struct {
#endif
} am;
-static Eterm fix_type_atoms[ERTS_ALC_NO_FIXED_SIZES];
+static Eterm alloc_type_atoms[ERTS_ALC_N_MAX + 1];
static ERTS_INLINE void atom_init(Eterm *atom, char *name)
{
- *atom = am_atom_put(name, strlen(name));
+ *atom = am_atom_put(name, sys_strlen(name));
}
#define AM_INIT(AM) atom_init(&am.AM, #AM)
@@ -4313,7 +4439,7 @@ init_atoms(Allctr_t *allctr)
AM_INIT(e);
AM_INIT(t);
AM_INIT(ramv);
- AM_INIT(sbct);
+ AM_INIT(atags);
#if HAVE_ERTS_MSEG
AM_INIT(asbcst);
AM_INIT(rsbcst);
@@ -4330,6 +4456,8 @@ init_atoms(Allctr_t *allctr)
AM_INIT(smbcs);
AM_INIT(mbcgs);
AM_INIT(acul);
+ AM_INIT(acnl);
+ AM_INIT(acfml);
#if HAVE_ERTS_MSEG
AM_INIT(mmc);
@@ -4340,9 +4468,18 @@ init_atoms(Allctr_t *allctr)
AM_INIT(fix_types);
AM_INIT(mbcs);
-#ifdef ERTS_SMP
AM_INIT(mbcs_pool);
-#endif
+ AM_INIT(fetch);
+ AM_INIT(fail_pooled);
+ AM_INIT(fail_shared);
+ AM_INIT(fail_pend_dealloc);
+ AM_INIT(fail);
+ AM_INIT(skip_size);
+ AM_INIT(skip_busy);
+ AM_INIT(skip_not_pooled);
+ AM_INIT(skip_homecoming);
+ AM_INIT(skip_race);
+ AM_INIT(entrance_removed);
AM_INIT(sbcs);
AM_INIT(sys_alloc_carriers_size);
@@ -4374,12 +4511,12 @@ init_atoms(Allctr_t *allctr)
}
#endif
- for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
- ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;
- char *name = (char *) ERTS_ALC_N2TD(n);
- size_t len = strlen(name);
- fix_type_atoms[ix] = am_atom_put(name, len);
- }
+ for (ix = ERTS_ALC_N_MIN; ix <= ERTS_ALC_N_MAX; ix++) {
+ const char *name = ERTS_ALC_N2TD(ix);
+ size_t len = sys_strlen(name);
+
+ alloc_type_atoms[ix] = am_atom_put(name, len);
+ }
}
if (allctr && !allctr->atoms_initialized) {
@@ -4473,7 +4610,7 @@ add_fix_types(Allctr_t *allctr, int internal, Uint **hpp, Uint *szp,
static Eterm
sz_info_fix(Allctr_t *allctr,
int internal,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -4492,22 +4629,22 @@ sz_info_fix(Allctr_t *allctr,
ErtsAlcFixList_t *fix = &allctr->fix[ix];
UWord alloced = fix->type_size * fix->u.cpool.allocated;
UWord used = fix->type_size * fix->u.cpool.used;
+ ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;
if (print_to_p) {
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
erts_print(to,
arg,
"fix type internal: %s %bpu %bpu\n",
- (char *) ERTS_ALC_N2TD(ERTS_ALC_N_MIN_A_FIXED_SIZE
- + ix),
+ (char *) ERTS_ALC_N2TD(n),
alloced,
used);
}
if (hpp || szp) {
add_3tup(hpp, szp, &res,
- fix_type_atoms[ix],
+ alloc_type_atoms[n],
bld_unstable_uint(hpp, szp, alloced),
bld_unstable_uint(hpp, szp, used));
}
@@ -4520,22 +4657,22 @@ sz_info_fix(Allctr_t *allctr,
ErtsAlcFixList_t *fix = &allctr->fix[ix];
UWord alloced = fix->type_size * fix->u.nocpool.allocated;
UWord used = fix->type_size*fix->u.nocpool.used;
+ ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;
if (print_to_p) {
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
erts_print(to,
arg,
"fix type: %s %bpu %bpu\n",
- (char *) ERTS_ALC_N2TD(ERTS_ALC_N_MIN_A_FIXED_SIZE
- + ix),
+ (char *) ERTS_ALC_N2TD(n),
alloced,
used);
}
if (hpp || szp) {
add_3tup(hpp, szp, &res,
- fix_type_atoms[ix],
+ alloc_type_atoms[n],
bld_unstable_uint(hpp, szp, alloced),
bld_unstable_uint(hpp, szp, used));
}
@@ -4548,7 +4685,7 @@ static Eterm
sz_info_carriers(Allctr_t *allctr,
CarriersStats_t *cs,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -4557,7 +4694,7 @@ sz_info_carriers(Allctr_t *allctr,
UWord curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
if (print_to_p) {
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
erts_print(to,
arg,
@@ -4592,13 +4729,12 @@ sz_info_carriers(Allctr_t *allctr,
return res;
}
-#ifdef ERTS_SMP
static Eterm
info_cpool(Allctr_t *allctr,
int sz_only,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -4615,7 +4751,7 @@ info_cpool(Allctr_t *allctr,
}
if (print_to_p) {
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
if (!sz_only)
erts_print(to, arg, "%sblocks: %bpu\n", prefix, nob);
@@ -4627,9 +4763,56 @@ info_cpool(Allctr_t *allctr,
if (hpp || szp) {
res = NIL;
+
+ if (!sz_only) {
+ add_3tup(hpp, szp, &res, am.fail_pooled,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_pooled)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_pooled)));
+
+ add_3tup(hpp, szp, &res, am.fail_shared,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_shared)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_shared)));
+
+ add_3tup(hpp, szp, &res, am.fail_pend_dealloc,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail_pend_dealloc)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail_pend_dealloc)));
+
+ add_3tup(hpp, szp, &res, am.fail,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fail)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fail)));
+
+ add_3tup(hpp, szp, &res, am.fetch,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.fetch)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.fetch)));
+
+ add_3tup(hpp, szp, &res, am.skip_size,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_size)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_size)));
+
+ add_3tup(hpp, szp, &res, am.skip_busy,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_busy)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_busy)));
+
+ add_3tup(hpp, szp, &res, am.skip_not_pooled,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_not_pooled)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_not_pooled)));
+
+ add_3tup(hpp, szp, &res, am.skip_homecoming,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_homecoming)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_homecoming)));
+
+ add_3tup(hpp, szp, &res, am.skip_race,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.skip_race)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.skip_race)));
+
+ add_3tup(hpp, szp, &res, am.entrance_removed,
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_GIGA_VAL(allctr->cpool.stat.entrance_removed)),
+ bld_unstable_uint(hpp, szp, ERTS_ALC_CC_VAL(allctr->cpool.stat.entrance_removed)));
+
add_2tup(hpp, szp, &res,
am.carriers_size,
bld_unstable_uint(hpp, szp, csz));
+ }
if (!sz_only)
add_2tup(hpp, szp, &res,
am.carriers,
@@ -4646,13 +4829,12 @@ info_cpool(Allctr_t *allctr,
return res;
}
-#endif /* ERTS_SMP */
static Eterm
info_carriers(Allctr_t *allctr,
CarriersStats_t *cs,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -4664,7 +4846,7 @@ info_carriers(Allctr_t *allctr,
curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
if (print_to_p) {
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
erts_print(to,
arg,
@@ -4770,27 +4952,27 @@ make_name_atoms(Allctr_t *allctr)
char realloc[] = "realloc";
char free[] = "free";
char buf[MAX_ATOM_CHARACTERS];
- size_t prefix_len = strlen(allctr->name_prefix);
+ size_t prefix_len = sys_strlen(allctr->name_prefix);
if (prefix_len > MAX_ATOM_CHARACTERS + sizeof(realloc) - 1)
erts_exit(ERTS_ERROR_EXIT,"Too long allocator name: %salloc\n",allctr->name_prefix);
- memcpy((void *) buf, (void *) allctr->name_prefix, prefix_len);
+ sys_memcpy((void *) buf, (void *) allctr->name_prefix, prefix_len);
- memcpy((void *) &buf[prefix_len], (void *) alloc, sizeof(alloc) - 1);
+ sys_memcpy((void *) &buf[prefix_len], (void *) alloc, sizeof(alloc) - 1);
allctr->name.alloc = am_atom_put(buf, prefix_len + sizeof(alloc) - 1);
- memcpy((void *) &buf[prefix_len], (void *) realloc, sizeof(realloc) - 1);
+ sys_memcpy((void *) &buf[prefix_len], (void *) realloc, sizeof(realloc) - 1);
allctr->name.realloc = am_atom_put(buf, prefix_len + sizeof(realloc) - 1);
- memcpy((void *) &buf[prefix_len], (void *) free, sizeof(free) - 1);
+ sys_memcpy((void *) &buf[prefix_len], (void *) free, sizeof(free) - 1);
allctr->name.free = am_atom_put(buf, prefix_len + sizeof(free) - 1);
}
static Eterm
info_calls(Allctr_t *allctr,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -4807,7 +4989,7 @@ info_calls(Allctr_t *allctr,
erts_print(TO, TOA, "%s%s calls: %b64u\n",PRFX,NAME,CC)
char *prefix = allctr->name_prefix;
- int to = *print_to_p;
+ fmtfn_t to = *print_to_p;
void *arg = print_to_arg;
PRINT_CC_5(to, arg, prefix, "alloc", allctr->calls.this_alloc);
@@ -4883,13 +5065,13 @@ info_calls(Allctr_t *allctr,
static Eterm
info_options(Allctr_t *allctr,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
{
Eterm res = THE_NON_VALUE;
- int acul;
+ UWord acul, acnl, acfml;
if (!allctr) {
if (print_to_p)
@@ -4901,11 +5083,9 @@ info_options(Allctr_t *allctr,
return res;
}
-#ifdef ERTS_SMP
acul = allctr->cpool.util_limit;
-#else
- acul = 0;
-#endif
+ acnl = allctr->cpool.in_pool_limit;
+ acfml = allctr->cpool.fblk_min_limit;
if (print_to_p) {
char topt[21]; /* Enough for any 64-bit integer */
@@ -4918,6 +5098,7 @@ info_options(Allctr_t *allctr,
"option e: true\n"
"option t: %s\n"
"option ramv: %s\n"
+ "option atags: %s\n"
"option sbct: %beu\n"
#if HAVE_ERTS_MSEG
"option asbcst: %bpu\n"
@@ -4933,9 +5114,10 @@ info_options(Allctr_t *allctr,
"option lmbcs: %beu\n"
"option smbcs: %beu\n"
"option mbcgs: %beu\n"
- "option acul: %d\n",
+ "option acul: %bpu\n",
topt,
allctr->ramv ? "true" : "false",
+ allctr->atags ? "true" : "false",
allctr->sbc_threshold,
#if HAVE_ERTS_MSEG
allctr->mseg_opt.abs_shrink_th,
@@ -4958,9 +5140,15 @@ info_options(Allctr_t *allctr,
hpp, szp);
if (hpp || szp) {
+ add_2tup(hpp, szp, &res,
+ am.acfml,
+ bld_uint(hpp, szp, acfml));
+ add_2tup(hpp, szp, &res,
+ am.acnl,
+ bld_uint(hpp, szp, acnl));
add_2tup(hpp, szp, &res,
am.acul,
- bld_uint(hpp, szp, (UWord) acul));
+ bld_uint(hpp, szp, acul));
add_2tup(hpp, szp, &res,
am.mbcgs,
bld_uint(hpp, szp, allctr->mbc_growth_stages));
@@ -4996,9 +5184,10 @@ info_options(Allctr_t *allctr,
bld_uint(hpp, szp, allctr->mseg_opt.abs_shrink_th));
#endif
add_2tup(hpp, szp, &res,
- am.sbct,
+ am_sbct,
bld_uint(hpp, szp, allctr->sbc_threshold));
add_2tup(hpp, szp, &res, am.ramv, allctr->ramv ? am_true : am_false);
+ add_2tup(hpp, szp, &res, am.atags, allctr->atags ? am_true : am_false);
add_2tup(hpp, szp, &res, am.t, (allctr->t ? am_true : am_false));
add_2tup(hpp, szp, &res, am.e, am_true);
}
@@ -5035,7 +5224,7 @@ reset_max_values(CarriersStats_t *cs)
\* */
Eterm
-erts_alcu_au_info_options(int *print_to_p, void *print_to_arg,
+erts_alcu_au_info_options(fmtfn_t *print_to_p, void *print_to_arg,
Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -5078,7 +5267,7 @@ erts_alcu_au_info_options(int *print_to_p, void *print_to_arg,
Eterm
erts_alcu_info_options(Allctr_t *allctr,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -5088,19 +5277,15 @@ erts_alcu_info_options(Allctr_t *allctr,
if (hpp || szp)
ensure_atoms_initialized(allctr);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
}
-#endif
res = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
erts_allctr_wrapper_pre_unlock();
}
-#endif
return res;
}
@@ -5110,15 +5295,13 @@ Eterm
erts_alcu_sz_info(Allctr_t *allctr,
int internal,
int begin_max_period,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
{
Eterm res, mbcs, sbcs, fix = THE_NON_VALUE;
-#ifdef ERTS_SMP
Eterm mbcs_pool;
-#endif
res = THE_NON_VALUE;
@@ -5133,16 +5316,14 @@ erts_alcu_sz_info(Allctr_t *allctr,
if (hpp || szp)
ensure_atoms_initialized(allctr);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
}
-#endif
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
- /* Update sbc values not continously updated */
+ /* Update sbc values not continuously updated */
allctr->sbcs.blocks.curr.no
= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
@@ -5154,23 +5335,19 @@ erts_alcu_sz_info(Allctr_t *allctr,
fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
print_to_arg, hpp, szp);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
mbcs_pool = info_cpool(allctr, 1, "mbcs_pool ", print_to_p,
print_to_arg, hpp, szp);
else
mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
-#endif
sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
print_to_arg, hpp, szp);
if (hpp || szp) {
res = NIL;
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
-#endif
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
add_fix_types(allctr, internal, hpp, szp, &res, fix);
}
@@ -5181,12 +5358,10 @@ erts_alcu_sz_info(Allctr_t *allctr,
}
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
erts_allctr_wrapper_pre_unlock();
}
-#endif
return res;
}
@@ -5196,15 +5371,13 @@ Eterm
erts_alcu_info(Allctr_t *allctr,
int internal,
int begin_max_period,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
{
Eterm res, sett, mbcs, sbcs, calls, fix = THE_NON_VALUE;
-#ifdef ERTS_SMP
Eterm mbcs_pool;
-#endif
res = THE_NON_VALUE;
@@ -5219,16 +5392,14 @@ erts_alcu_info(Allctr_t *allctr,
if (hpp || szp)
ensure_atoms_initialized(allctr);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
}
-#endif
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
- /* Update sbc values not continously updated */
+ /* Update sbc values not continuously updated */
allctr->sbcs.blocks.curr.no
= allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
@@ -5249,13 +5420,11 @@ erts_alcu_info(Allctr_t *allctr,
fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
print_to_arg, hpp, szp);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
mbcs_pool = info_cpool(allctr, 0, "mbcs_pool ", print_to_p,
print_to_arg, hpp, szp);
else
mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
-#endif
sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
print_to_arg, hpp, szp);
calls = info_calls(allctr, print_to_p, print_to_arg, hpp, szp);
@@ -5265,10 +5434,8 @@ erts_alcu_info(Allctr_t *allctr,
add_2tup(hpp, szp, &res, am.calls, calls);
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
-#endif
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
add_fix_types(allctr, internal, hpp, szp, &res, fix);
add_2tup(hpp, szp, &res, am.options, sett);
@@ -5284,12 +5451,10 @@ erts_alcu_info(Allctr_t *allctr,
}
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
erts_allctr_wrapper_pre_unlock();
}
-#endif
return res;
}
@@ -5299,10 +5464,8 @@ void
erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz)
{
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
size->carriers = allctr->mbcs.curr.norm.mseg.size;
size->carriers += allctr->mbcs.curr.norm.sys_alloc.size;
@@ -5312,14 +5475,12 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *
size->blocks = allctr->mbcs.blocks.curr.size;
size->blocks += allctr->sbcs.blocks.curr.size;
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
UWord csz, bsz;
cpool_read_stat(allctr, NULL, &csz, NULL, &bsz);
size->blocks += bsz;
size->carriers += csz;
}
-#endif
if (fi) {
int ix;
@@ -5341,25 +5502,22 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *
}
}
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
}
/* ----------------------------------------------------------------------- */
static ERTS_INLINE void *
-do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
+do_erts_alcu_alloc(ErtsAlcType_t type, Allctr_t *allctr, Uint size)
{
- Allctr_t *allctr = (Allctr_t *) extra;
void *res;
ASSERT(initialized);
ASSERT(allctr);
- ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ ERTS_LC_ASSERT(!allctr->thread_safe
|| erts_lc_mtx_is_locked(&allctr->mutex));
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
@@ -5391,41 +5549,57 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
void *erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
{
+ Allctr_t *allctr = (Allctr_t *) extra;
void *res;
-#ifdef ERTS_SMP
+
ASSERT(!"This is not thread safe");
-#elif defined(USE_THREADS)
- ASSERT(erts_equal_tids(erts_main_thread, erts_thr_self()));
-#endif
- res = do_erts_alcu_alloc(type, extra, size);
+
+ res = do_erts_alcu_alloc(type, allctr, size);
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
+ }
+
DEBUG_CHECK_ALIGNMENT(res);
+
return res;
}
-#ifdef USE_THREADS
void *
erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size)
{
Allctr_t *allctr = (Allctr_t *) extra;
+ alcu_atag_t tag = 0;
void *res;
+
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
erts_mtx_lock(&allctr->mutex);
- res = do_erts_alcu_alloc(type, extra, size);
- DEBUG_CHECK_ALIGNMENT(res);
+ res = do_erts_alcu_alloc(type, allctr, size);
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
erts_mtx_unlock(&allctr->mutex);
+
+ DEBUG_CHECK_ALIGNMENT(res);
+
return res;
}
-#ifdef ERTS_SMP
void *
erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
int ix;
+ alcu_atag_t tag = 0;
Allctr_t *allctr;
void *res;
@@ -5435,11 +5609,19 @@ erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
allctr = tspec->allctr[ix];
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
res = do_erts_alcu_alloc(type, allctr, size);
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
+
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
@@ -5452,54 +5634,55 @@ void *
erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
{
Allctr_t *pref_allctr;
+ alcu_atag_t tag = 0;
void *res;
pref_allctr = get_pref_allctr(extra);
+ if (pref_allctr->atags) {
+ tag = determine_alloc_tag(pref_allctr, type);
+ }
+
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
-#ifdef ERTS_SMP
ASSERT(pref_allctr->dd.use);
ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
-#endif
ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr);
res = do_erts_alcu_alloc(type, pref_allctr, size);
-#ifdef ERTS_SMP
if (!res && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
/* Cleaned up a bit more; try one more time... */
res = do_erts_alcu_alloc(type, pref_allctr, size);
}
-#endif
+
+ if (pref_allctr->atags && res) {
+ set_alloc_tag(pref_allctr, res, tag);
+ }
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
DEBUG_CHECK_ALIGNMENT(res);
-
return res;
}
-#endif
-#endif
/* ------------------------------------------------------------------------- */
static ERTS_INLINE void
-do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p,
+do_erts_alcu_free(ErtsAlcType_t type, Allctr_t *allctr, void *p,
Carrier_t **busy_pcrr_pp)
{
- Allctr_t *allctr = (Allctr_t *) extra;
ASSERT(initialized);
ASSERT(allctr);
- ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ ERTS_LC_ASSERT(!allctr->thread_safe
|| erts_lc_mtx_is_locked(&allctr->mutex));
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
@@ -5526,21 +5709,20 @@ do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p,
void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
{
- do_erts_alcu_free(type, extra, p, NULL);
+ Allctr_t *allctr = (Allctr_t *) extra;
+ do_erts_alcu_free(type, allctr, p, NULL);
}
-#ifdef USE_THREADS
void
erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
{
Allctr_t *allctr = (Allctr_t *) extra;
erts_mtx_lock(&allctr->mutex);
- do_erts_alcu_free(type, extra, p, NULL);
+ do_erts_alcu_free(type, allctr, p, NULL);
erts_mtx_unlock(&allctr->mutex);
}
-#ifdef ERTS_SMP
void
erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p)
@@ -5574,12 +5756,13 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
pref_allctr = get_pref_allctr(extra);
used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_IF_USED,
p, NULL, &busy_pcrr_p);
- if (pref_allctr != used_allctr)
+ if (pref_allctr != used_allctr) {
enqueue_dealloc_other_instance(type,
- used_allctr,
- p,
- (used_allctr->dd.ix
- - pref_allctr->dd.ix));
+ used_allctr,
+ p,
+ (used_allctr->dd.ix
+ - pref_allctr->dd.ix));
+ }
else {
ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
do_erts_alcu_free(type, used_allctr, p, &busy_pcrr_p);
@@ -5590,21 +5773,18 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
}
}
-#endif
-#endif
/* ------------------------------------------------------------------------- */
static ERTS_INLINE void *
do_erts_alcu_realloc(ErtsAlcType_t type,
- void *extra,
+ Allctr_t *allctr,
void *p,
Uint size,
Uint32 alcu_flgs,
Carrier_t **busy_pcrr_pp)
{
- Allctr_t *allctr = (Allctr_t *) extra;
Block_t *blk;
void *res;
@@ -5612,13 +5792,13 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
ASSERT(allctr);
- ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ ERTS_LC_ASSERT(!allctr->thread_safe
|| erts_lc_mtx_is_locked(&allctr->mutex));
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
if (!p) {
- res = do_erts_alcu_alloc(type, extra, size);
+ res = do_erts_alcu_alloc(type, allctr, size);
INC_CC(allctr->calls.this_realloc);
DEC_CC(allctr->calls.this_alloc);
return res;
@@ -5627,7 +5807,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
#if ALLOC_ZERO_EQ_NULL
if (!size) {
ASSERT(p);
- do_erts_alcu_free(type, extra, p, busy_pcrr_pp);
+ do_erts_alcu_free(type, allctr, p, busy_pcrr_pp);
INC_CC(allctr->calls.this_realloc);
DEC_CC(allctr->calls.this_free);
return NULL;
@@ -5712,19 +5892,29 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
void *
erts_alcu_realloc(ErtsAlcType_t type, void *extra, void *p, Uint size)
{
+ Allctr_t *allctr = (Allctr_t *)extra;
void *res;
- res = do_erts_alcu_realloc(type, extra, p, size, 0, NULL);
+
+ res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
+
DEBUG_CHECK_ALIGNMENT(res);
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
+ }
+
return res;
}
void *
erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size)
{
+ Allctr_t *allctr = (Allctr_t *)extra;
void *res;
- res = do_erts_alcu_alloc(type, extra, size);
+
+ res = do_erts_alcu_alloc(type, allctr, size);
if (!res)
- res = erts_alcu_realloc(type, extra, p, size);
+ res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
else {
Block_t *blk;
size_t cpy_size;
@@ -5734,24 +5924,42 @@ erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size)
if (cpy_size > size)
cpy_size = size;
sys_memcpy(res, p, cpy_size);
- do_erts_alcu_free(type, extra, p, NULL);
+ do_erts_alcu_free(type, allctr, p, NULL);
}
+
DEBUG_CHECK_ALIGNMENT(res);
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, determine_alloc_tag(allctr, type));
+ }
+
return res;
}
-#ifdef USE_THREADS
-
void *
erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
{
Allctr_t *allctr = (Allctr_t *) extra;
+ alcu_atag_t tag = 0;
void *res;
+
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
erts_mtx_lock(&allctr->mutex);
- res = do_erts_alcu_realloc(type, extra, ptr, size, 0, NULL);
+
+ res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
+
erts_mtx_unlock(&allctr->mutex);
+
DEBUG_CHECK_ALIGNMENT(res);
+
return res;
}
@@ -5759,11 +5967,17 @@ void *
erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
{
Allctr_t *allctr = (Allctr_t *) extra;
+ alcu_atag_t tag = 0;
void *res;
+
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
erts_mtx_lock(&allctr->mutex);
- res = do_erts_alcu_alloc(type, extra, size);
+ res = do_erts_alcu_alloc(type, allctr, size);
if (!res)
- res = erts_alcu_realloc_ts(type, extra, p, size);
+ res = do_erts_alcu_realloc(type, allctr, p, size, 0, NULL);
else {
Block_t *blk;
size_t cpy_size;
@@ -5773,14 +5987,20 @@ erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
if (cpy_size > size)
cpy_size = size;
sys_memcpy(res, p, cpy_size);
- do_erts_alcu_free(type, extra, p, NULL);
+ do_erts_alcu_free(type, allctr, p, NULL);
}
+
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
+
erts_mtx_unlock(&allctr->mutex);
+
DEBUG_CHECK_ALIGNMENT(res);
+
return res;
}
-#ifdef ERTS_SMP
void *
erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
@@ -5788,6 +6008,7 @@ erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
int ix;
+ alcu_atag_t tag = 0;
Allctr_t *allctr;
void *res;
@@ -5797,11 +6018,19 @@ erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
allctr = tspec->allctr[ix];
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
+
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
@@ -5816,6 +6045,7 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
int ix;
+ alcu_atag_t tag = 0;
Allctr_t *allctr;
void *res;
@@ -5825,14 +6055,16 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
allctr = tspec->allctr[ix];
+ if (allctr->atags) {
+ tag = determine_alloc_tag(allctr, type);
+ }
+
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
res = do_erts_alcu_alloc(type, allctr, size);
if (!res) {
- if (allctr->thread_safe)
- erts_mtx_unlock(&allctr->mutex);
- res = erts_alcu_realloc_thr_spec(type, allctr, ptr, size);
+ res = do_erts_alcu_realloc(type, allctr, ptr, size, 0, NULL);
}
else {
Block_t *blk;
@@ -5844,41 +6076,42 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
cpy_size = size;
sys_memcpy(res, ptr, cpy_size);
do_erts_alcu_free(type, allctr, ptr, NULL);
- if (allctr->thread_safe)
- erts_mtx_unlock(&allctr->mutex);
}
+ if (allctr->atags && res) {
+ set_alloc_tag(allctr, res, tag);
+ }
+
+ if (allctr->thread_safe)
+ erts_mtx_unlock(&allctr->mutex);
+
DEBUG_CHECK_ALIGNMENT(res);
return res;
}
static ERTS_INLINE void *
-realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
+realloc_thr_pref(ErtsAlcType_t type, Allctr_t *pref_allctr, void *p, Uint size,
int force_move)
{
void *res;
- Allctr_t *pref_allctr, *used_allctr;
+ Allctr_t *used_allctr;
UWord old_user_size;
Carrier_t *busy_pcrr_p;
-#ifdef ERTS_SMP
+ alcu_atag_t tag = 0;
int retried;
-#endif
- if (!p)
- return erts_alcu_alloc_thr_pref(type, extra, size);
-
- pref_allctr = get_pref_allctr(extra);
+ if (pref_allctr->atags) {
+ tag = determine_alloc_tag(pref_allctr, type);
+ }
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
-#ifdef ERTS_SMP
ASSERT(pref_allctr->dd.use);
ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
retried = 0;
restart:
-#endif
used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_NO,
p, &old_user_size, &busy_pcrr_p);
@@ -5894,13 +6127,16 @@ restart:
0,
&busy_pcrr_p);
clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
-#ifdef ERTS_SMP
if (!res && !retried && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
/* Cleaned up a bit more; try one more time... */
retried = 1;
goto restart;
}
-#endif
+
+ if (pref_allctr->atags && res) {
+ set_alloc_tag(pref_allctr, res, tag);
+ }
+
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
}
@@ -5909,6 +6145,9 @@ restart:
if (!res)
goto unlock_ts_return;
else {
+ if (pref_allctr->atags) {
+ set_alloc_tag(pref_allctr, res, tag);
+ }
DEBUG_CHECK_ALIGNMENT(res);
@@ -5939,25 +6178,68 @@ restart:
}
}
+ DEBUG_CHECK_ALIGNMENT(res);
+
return res;
}
void *
erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size)
{
- return realloc_thr_pref(type, extra, p, size, 0);
+ if (p) {
+ Allctr_t *pref_allctr = get_pref_allctr(extra);
+
+ return realloc_thr_pref(type, pref_allctr, p, size, 0);
+ }
+
+ return erts_alcu_alloc_thr_pref(type, extra, size);
}
void *
erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
void *p, Uint size)
{
- return realloc_thr_pref(type, extra, p, size, 1);
+ if (p) {
+ Allctr_t *pref_allctr = get_pref_allctr(extra);
+
+ return realloc_thr_pref(type, pref_allctr, p, size, 1);
+ }
+
+ return erts_alcu_alloc_thr_pref(type, extra, size);
}
-#endif
+
+static Uint adjust_sbct(Allctr_t* allctr, Uint sbct)
+{
+#ifndef ARCH_64
+ if (sbct > 0) {
+ Uint max_mbc_block_sz = UNIT_CEILING(sbct - 1 + ABLK_HDR_SZ);
+ if (max_mbc_block_sz + UNIT_FLOOR(allctr->min_block_size - 1) > MBC_ABLK_SZ_MASK
+ || max_mbc_block_sz < sbct) { /* wrap around */
+ /*
+ * By limiting sbc_threshold to (hard limit - min_block_size)
+ * we avoid having to split off free "residue blocks"
+ * smaller than min_block_size.
+ */
+ max_mbc_block_sz = MBC_ABLK_SZ_MASK - UNIT_FLOOR(allctr->min_block_size - 1);
+ sbct = max_mbc_block_sz - ABLK_HDR_SZ + 1;
+ }
+ }
#endif
+ return sbct;
+}
+
+int erts_alcu_try_set_dyn_param(Allctr_t* allctr, Eterm param, Uint value)
+{
+ const Uint MIN_DYN_SBCT = 4000; /* a lame catastrophe prevention */
+
+ if (param == am_sbct && value >= MIN_DYN_SBCT) {
+ allctr->sbc_threshold = adjust_sbct(allctr, value);
+ return 1;
+ }
+ return 0;
+}
/* ------------------------------------------------------------------------- */
@@ -5978,10 +6260,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
sys_memcpy((void *) &allctr->mseg_opt,
(void *) &erts_mseg_default_opt,
sizeof(ErtsMsegOpt_t));
-#ifdef ERTS_SMP
if (init->tspec || init->tpref)
allctr->mseg_opt.sched_spec = 1;
-#endif /* ERTS_SMP */
#endif /* HAVE_ERTS_MSEG */
allctr->name_prefix = init->name_prefix;
@@ -6009,6 +6289,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->t = 0;
allctr->ramv = init->ramv;
+ allctr->atags = init->atags;
allctr->main_carrier_size = init->mmbcs;
#if HAVE_ERTS_MSEG
@@ -6039,7 +6320,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
goto error;
allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
+ sizeof(FreeBlkFtr_t));
-#if ERTS_SMP
if (init->tpref) {
Uint sz = ABLK_HDR_SZ;
sz += (init->fix ?
@@ -6049,10 +6329,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->min_block_size = sz;
}
- allctr->cpool.pooled_list.next = &allctr->cpool.pooled_list;
- allctr->cpool.pooled_list.prev = &allctr->cpool.pooled_list;
- allctr->cpool.traitor_list.next = &allctr->cpool.traitor_list;
- allctr->cpool.traitor_list.prev = &allctr->cpool.traitor_list;
+ allctr->cpool.pooled_tree = NULL;
allctr->cpool.dc_list.first = NULL;
allctr->cpool.dc_list.last = NULL;
allctr->cpool.abandon_limit = 0;
@@ -6062,51 +6339,34 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
erts_atomic_init_nob(&allctr->cpool.stat.carriers_size, 0);
erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0);
allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT;
- allctr->cpool.util_limit = init->ts ? 0 : init->acul;
-#endif
-
- allctr->sbc_threshold = init->sbct;
-#ifndef ARCH_64
- if (allctr->sbc_threshold > 0) {
- Uint max_mbc_block_sz = UNIT_CEILING(allctr->sbc_threshold - 1 + ABLK_HDR_SZ);
- if (max_mbc_block_sz + UNIT_FLOOR(allctr->min_block_size - 1) > MBC_ABLK_SZ_MASK
- || max_mbc_block_sz < allctr->sbc_threshold) { /* wrap around */
- /*
- * By limiting sbc_threshold to (hard limit - min_block_size)
- * we avoid having to split off free "residue blocks"
- * smaller than min_block_size.
- */
- max_mbc_block_sz = MBC_ABLK_SZ_MASK - UNIT_FLOOR(allctr->min_block_size - 1);
- allctr->sbc_threshold = max_mbc_block_sz - ABLK_HDR_SZ + 1;
- }
+ if (!init->ts && init->acul && init->acnl) {
+ allctr->cpool.util_limit = init->acul;
+ allctr->cpool.in_pool_limit = init->acnl;
+ allctr->cpool.fblk_min_limit = init->acfml;
}
-#endif
+ else {
+ allctr->cpool.util_limit = 0;
+ allctr->cpool.in_pool_limit = 0;
+ allctr->cpool.fblk_min_limit = 0;
+ }
+
+ allctr->sbc_threshold = adjust_sbct(allctr, init->sbct);
#if HAVE_ERTS_MSEG
if (allctr->mseg_opt.abs_shrink_th > ~((UWord) 0) / 100)
allctr->mseg_opt.abs_shrink_th = ~((UWord) 0) / 100;
#endif
-#ifdef USE_THREADS
if (init->ts) {
allctr->thread_safe = 1;
-
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x_opt(&allctr->mutex,
- "alcu_allocator",
- make_small(allctr->alloc_no),
- ERTS_LCNT_LT_ALLOC,1);
-#else
- erts_mtx_init_x(&allctr->mutex,
- "alcu_allocator",
- make_small(allctr->alloc_no),1);
-#endif /*ERTS_ENABLE_LOCK_COUNT*/
-
+
+ erts_mtx_init(&allctr->mutex, "alcu_allocator", make_small(allctr->alloc_no),
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+
#ifdef DEBUG
allctr->debug.saved_tid = 0;
#endif
}
-#endif
if(!allctr->get_free_block
|| !allctr->link_free_block
@@ -6119,14 +6379,12 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (allctr->mbc_header_size < sizeof(Carrier_t))
goto error;
-#ifdef ERTS_SMP
allctr->dd.use = 0;
if (init->tpref) {
allctr->dd.use = 1;
init_dd_queue(&allctr->dd.q);
allctr->dd.ix = init->ix;
}
-#endif
allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
+ ABLK_HDR_SZ)
- ABLK_HDR_SZ);
@@ -6143,6 +6401,9 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->sys_realloc = &erts_alcu_sys_realloc;
allctr->sys_dealloc = &erts_alcu_sys_dealloc;
}
+
+ allctr->try_set_dyn_param = &erts_alcu_try_set_dyn_param;
+
#if HAVE_ERTS_MSEG
if (init->mseg_alloc) {
ASSERT(init->mseg_realloc && init->mseg_dealloc);
@@ -6157,6 +6418,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->mseg_realloc = &erts_alcu_mseg_realloc;
allctr->mseg_dealloc = &erts_alcu_mseg_dealloc;
}
+
/* If a custom carrier alloc function is specified, make sure it's used */
if (init->mseg_alloc && !init->sys_alloc) {
allctr->crr_set_flgs = CFLG_FORCE_MSEG;
@@ -6180,10 +6442,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
| CFLG_NO_CPOOL
| CFLG_MAIN_CARRIER);
if (!blk) {
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_destroy(&allctr->mutex);
-#endif
erts_exit(ERTS_ABORT_EXIT,
"Failed to create main carrier for %salloc\n",
init->name_prefix);
@@ -6203,9 +6463,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->fix[i].type_size = init->fix_type_size[i];
allctr->fix[i].list_size = 0;
allctr->fix[i].list = NULL;
-#ifdef ERTS_SMP
ASSERT(allctr->fix[i].type_size >= sizeof(ErtsAllctrFixDDBlock_t));
-#endif
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
allctr->fix[i].u.cpool.min_list_size = 0;
allctr->fix[i].u.cpool.shrink_list = 0;
@@ -6225,10 +6483,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
error:
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_destroy(&allctr->mutex);
-#endif
return 0;
@@ -6246,10 +6502,8 @@ erts_alcu_stop(Allctr_t *allctr)
while (allctr->mbc_list.first)
destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first), NULL);
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_destroy(&allctr->mutex);
-#endif
}
@@ -6258,14 +6512,12 @@ erts_alcu_stop(Allctr_t *allctr)
void
erts_alcu_init(AlcUInit_t *init)
{
-#ifdef ERTS_SMP
int i;
for (i = 0; i <= ERTS_ALC_A_MAX; i++) {
ErtsAlcCPoolData_t *sentinel = &carrier_pool[i].sentinel;
erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel);
erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
}
-#endif
ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
#if HAVE_ERTS_MSEG
ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ);
@@ -6280,12 +6532,1079 @@ erts_alcu_init(AlcUInit_t *init)
carrier_alignment = sizeof(Unit_t);
#endif
- erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms");
+ erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
atoms_initialized = 0;
initialized = 1;
}
+/* ------------------------------------------------------------------------- */
+
+/* Allocation histograms and carrier information is gathered by walking through
+ * all carriers associated with each allocator instance. This is done as
+ * aux_yield_work on the scheduler that owns each instance.
+ *
+ * Yielding is implemented by temporarily inserting a "dummy carrier" at the
+ * last position. It's permanently "busy" so it won't get picked up by someone
+ * else when in the carrier pool, and we never make the employer aware of it
+ * through callbacks so we can't accidentally allocate on it.
+ *
+ * Plain malloc/free is used to guarantee we won't allocate with the allocator
+ * we're scanning. */
+
+/* Yield between carriers once this many blocks have been processed. Note that
+ * a single carrier scan may exceed this figure. */
+#ifndef DEBUG
+ #define BLOCKSCAN_REDUCTIONS (8000)
+#else
+ #define BLOCKSCAN_REDUCTIONS (400)
+#endif
+
+/* Abort a single carrier scan after this many blocks to prevent really large
+ * MBCs from blocking forever. */
+#define BLOCKSCAN_BAILOUT_THRESHOLD (16000)
+
+typedef struct alcu_blockscan {
+ /* A per-scheduler list used when multiple scans have been queued. The
+ * current scanner will always run until completion/abort before moving on
+ * to the next. */
+ struct alcu_blockscan *scanner_queue;
+
+ Allctr_t *allocator;
+ Process *process;
+
+ int (*current_op)(struct alcu_blockscan *scanner);
+ int (*next_op)(struct alcu_blockscan *scanner);
+ int reductions;
+
+ ErtsAlcCPoolData_t *cpool_cursor;
+ CarrierList_t *current_clist;
+ Carrier_t *clist_cursor;
+ Carrier_t dummy_carrier;
+
+ /* Called if the process that started this job dies before we're done. */
+ void (*abort)(void *user_data);
+
+ /* Called on each carrier. The callback must return the number of blocks
+ * scanned to yield properly between carriers.
+ *
+ * Note that it's not possible to "yield back" into a carrier. */
+ int (*scan)(Allctr_t *, void *user_data, Carrier_t *);
+
+ /* Called when all carriers have been scanned. The callback may return
+ * non-zero to yield. */
+ int (*finish)(void *user_data);
+
+ void *user_data;
+} blockscan_t;
+
+static Carrier_t *blockscan_restore_clist_cursor(blockscan_t *state)
+{
+ Carrier_t *cursor = state->clist_cursor;
+
+ ASSERT(state->clist_cursor == (state->current_clist)->first ||
+ state->clist_cursor == &state->dummy_carrier);
+
+ if (cursor == &state->dummy_carrier) {
+ cursor = cursor->next;
+
+ unlink_carrier(state->current_clist, state->clist_cursor);
+ }
+
+ return cursor;
+}
+
+static void blockscan_save_clist_cursor(blockscan_t *state, Carrier_t *after)
+{
+ ASSERT(state->clist_cursor == (state->current_clist)->first ||
+ state->clist_cursor == &state->dummy_carrier);
+
+ state->clist_cursor = &state->dummy_carrier;
+
+ (state->clist_cursor)->next = after->next;
+ (state->clist_cursor)->prev = after;
+
+ relink_carrier(state->current_clist, state->clist_cursor);
+}
+
+static int blockscan_clist_yielding(blockscan_t *state)
+{
+ Carrier_t *cursor = blockscan_restore_clist_cursor(state);
+
+ if (ERTS_PROC_IS_EXITING(state->process)) {
+ return 0;
+ }
+
+ while (cursor) {
+ /* Skip dummy carriers inserted by another (concurrent) block scan.
+ * This can happen when scanning thread-safe allocators from multiple
+ * schedulers. */
+ if (CARRIER_SZ(cursor) > 0) {
+ int blocks_scanned = state->scan(state->allocator,
+ state->user_data,
+ cursor);
+
+ state->reductions -= blocks_scanned;
+
+ if (state->reductions <= 0) {
+ blockscan_save_clist_cursor(state, cursor);
+ return 1;
+ }
+ }
+
+ cursor = cursor->next;
+ }
+
+ return 0;
+}
+
+static ErtsAlcCPoolData_t *blockscan_restore_cpool_cursor(blockscan_t *state)
+{
+ ErtsAlcCPoolData_t *cursor;
+
+ cursor = cpool_aint2cpd(cpool_read(&(state->cpool_cursor)->next));
+
+ if (state->cpool_cursor == &state->dummy_carrier.cpool) {
+ cpool_delete(state->allocator, state->allocator, &state->dummy_carrier);
+ }
+
+ return cursor;
+}
+
+static void blockscan_save_cpool_cursor(blockscan_t *state,
+ ErtsAlcCPoolData_t *after)
+{
+ ErtsAlcCPoolData_t *dummy_carrier, *prev_carrier, *next_carrier;
+
+ dummy_carrier = &state->dummy_carrier.cpool;
+
+ next_carrier = cpool_aint2cpd(cpool_mod_mark(&after->next));
+ prev_carrier = cpool_aint2cpd(cpool_mod_mark(&next_carrier->prev));
+
+ cpool_init(&dummy_carrier->next, (erts_aint_t)next_carrier);
+ cpool_init(&dummy_carrier->prev, (erts_aint_t)prev_carrier);
+
+ cpool_set_mod_marked(&prev_carrier->next,
+ (erts_aint_t)dummy_carrier,
+ (erts_aint_t)next_carrier);
+ cpool_set_mod_marked(&next_carrier->prev,
+ (erts_aint_t)dummy_carrier,
+ (erts_aint_t)prev_carrier);
+
+ state->cpool_cursor = dummy_carrier;
+}
+
+static int blockscan_cpool_yielding(blockscan_t *state)
+{
+ ErtsAlcCPoolData_t *sentinel, *cursor;
+
+ sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel;
+ cursor = blockscan_restore_cpool_cursor(state);
+
+ if (ERTS_PROC_IS_EXITING(state->process)) {
+ return 0;
+ }
+
+ while (cursor != sentinel) {
+ Carrier_t *carrier;
+ erts_aint_t exp;
+
+ /* When a deallocation happens on a pooled carrier it will be routed to
+ * its owner, so the only way to be sure that it isn't modified while
+ * scanning is to skip all carriers that aren't ours. The deallocations
+ * deferred to us will get handled when we're done. */
+ while (cursor->orig_allctr != state->allocator) {
+ cursor = cpool_aint2cpd(cpool_read(&cursor->next));
+
+ if (cursor == sentinel) {
+ return 0;
+ }
+ }
+
+ carrier = ErtsContainerStruct(cursor, Carrier_t, cpool);
+ exp = erts_atomic_read_rb(&carrier->allctr);
+
+ if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) {
+ ASSERT(state->allocator == (Allctr_t*)(exp & ~ERTS_CRR_ALCTR_FLG_MASK));
+ ASSERT(!(exp & ERTS_CRR_ALCTR_FLG_BUSY));
+
+ if (erts_atomic_cmpxchg_acqb(&carrier->allctr,
+ exp | ERTS_CRR_ALCTR_FLG_BUSY,
+ exp) == exp) {
+ /* Skip dummy carriers inserted by another (concurrent) block
+ * scan. This can happen when scanning thread-safe allocators
+ * from multiple schedulers. */
+ if (CARRIER_SZ(carrier) > 0) {
+ int blocks_scanned = state->scan(state->allocator,
+ state->user_data,
+ carrier);
+
+ state->reductions -= blocks_scanned;
+
+ if (state->reductions <= 0) {
+ blockscan_save_cpool_cursor(state, cursor);
+ erts_atomic_set_relb(&carrier->allctr, exp);
+
+ return 1;
+ }
+ }
+
+ erts_atomic_set_relb(&carrier->allctr, exp);
+ }
+ }
+
+ cursor = cpool_aint2cpd(cpool_read(&cursor->next));
+ }
+
+ return 0;
+}
+
+static int blockscan_yield_helper(blockscan_t *state,
+ int (*yielding_op)(blockscan_t*))
+{
+ /* Note that we don't check whether to abort here; only yielding_op knows
+ * whether the carrier is still in the list/pool. */
+
+ if ((state->allocator)->thread_safe) {
+ /* Locked scans have to be as short as possible. */
+ state->reductions = 1;
+
+ erts_mtx_lock(&(state->allocator)->mutex);
+ } else {
+ state->reductions = BLOCKSCAN_REDUCTIONS;
+ }
+
+ if (yielding_op(state)) {
+ state->next_op = state->current_op;
+ }
+
+ if ((state->allocator)->thread_safe) {
+ erts_mtx_unlock(&(state->allocator)->mutex);
+ }
+
+ return 1;
+}
+
+/* */
+
+static int blockscan_finish(blockscan_t *state)
+{
+ if (ERTS_PROC_IS_EXITING(state->process)) {
+ state->abort(state->user_data);
+ return 0;
+ }
+
+ state->current_op = blockscan_finish;
+
+ return state->finish(state->user_data);
+}
+
+static int blockscan_sweep_sbcs(blockscan_t *state)
+{
+ if (state->current_op != blockscan_sweep_sbcs) {
+ SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_SBC, state->allocator);
+ state->current_clist = &(state->allocator)->sbc_list;
+ state->clist_cursor = (state->current_clist)->first;
+ }
+
+ state->current_op = blockscan_sweep_sbcs;
+ state->next_op = blockscan_finish;
+
+ return blockscan_yield_helper(state, blockscan_clist_yielding);
+}
+
+static int blockscan_sweep_mbcs(blockscan_t *state)
+{
+ if (state->current_op != blockscan_sweep_mbcs) {
+ SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator);
+ state->current_clist = &(state->allocator)->mbc_list;
+ state->clist_cursor = (state->current_clist)->first;
+ }
+
+ state->current_op = blockscan_sweep_mbcs;
+ state->next_op = blockscan_sweep_sbcs;
+
+ return blockscan_yield_helper(state, blockscan_clist_yielding);
+}
+
+static int blockscan_sweep_cpool(blockscan_t *state)
+{
+ if (state->current_op != blockscan_sweep_cpool) {
+ ErtsAlcCPoolData_t *sentinel;
+
+ SET_CARRIER_HDR(&state->dummy_carrier, 0, SCH_MBC, state->allocator);
+ sentinel = &carrier_pool[(state->allocator)->alloc_no].sentinel;
+ state->cpool_cursor = sentinel;
+ }
+
+ state->current_op = blockscan_sweep_cpool;
+ state->next_op = blockscan_sweep_mbcs;
+
+ return blockscan_yield_helper(state, blockscan_cpool_yielding);
+}
+
+static int blockscan_get_specific_allocator(int allocator_num,
+ int sched_id,
+ Allctr_t **out)
+{
+ ErtsAllocatorInfo_t *ai;
+ Allctr_t *allocator;
+
+ ASSERT(allocator_num >= ERTS_ALC_A_MIN &&
+ allocator_num <= ERTS_ALC_A_MAX);
+ ASSERT(sched_id >= 0 && sched_id <= erts_no_schedulers);
+
+ ai = &erts_allctrs_info[allocator_num];
+
+ if (!ai->enabled || !ai->alloc_util) {
+ return 0;
+ }
+
+ if (!ai->thr_spec) {
+ if (sched_id != 0) {
+ /* Only thread-specific allocators can be scanned on a specific
+ * scheduler. */
+ return 0;
+ }
+
+ allocator = (Allctr_t*)ai->extra;
+ ASSERT(allocator->thread_safe);
+ } else {
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t*)ai->extra;
+
+ ASSERT(sched_id < tspec->size);
+
+ allocator = tspec->allctr[sched_id];
+ }
+
+ *out = allocator;
+
+ return 1;
+}
+
+static void blockscan_sched_trampoline(void *arg)
+{
+ ErtsAlcuBlockscanYieldData *yield;
+ ErtsSchedulerData *esdp;
+ blockscan_t *scanner;
+
+ esdp = erts_get_scheduler_data();
+ scanner = (blockscan_t*)arg;
+
+ yield = ERTS_SCHED_AUX_YIELD_DATA(esdp, alcu_blockscan);
+
+ ASSERT((yield->last == NULL) == (yield->current == NULL));
+
+ if (yield->last != NULL) {
+ blockscan_t *prev_scanner = yield->last;
+
+ ASSERT(prev_scanner->scanner_queue == NULL);
+
+ prev_scanner->scanner_queue = scanner;
+ } else {
+ yield->current = scanner;
+ }
+
+ scanner->scanner_queue = NULL;
+ yield->last = scanner;
+
+ erts_notify_new_aux_yield_work(esdp);
+}
+
+static void blockscan_dispatch(blockscan_t *scanner, Process *owner,
+ Allctr_t *allocator, int sched_id)
+{
+ ASSERT(erts_get_scheduler_id() != 0);
+
+ if (sched_id == 0) {
+ /* Global instances are always handled on the current scheduler. */
+ sched_id = ERTS_ALC_GET_THR_IX();
+ ASSERT(allocator->thread_safe);
+ }
+
+ scanner->allocator = allocator;
+ scanner->process = owner;
+
+ erts_proc_inc_refc(scanner->process);
+
+ cpool_init_carrier_data(scanner->allocator, &scanner->dummy_carrier);
+ erts_atomic_init_nob(&(scanner->dummy_carrier).allctr,
+ (erts_aint_t)allocator | ERTS_CRR_ALCTR_FLG_BUSY);
+
+ if (ERTS_ALC_IS_CPOOL_ENABLED(scanner->allocator)) {
+ scanner->next_op = blockscan_sweep_cpool;
+ } else {
+ scanner->next_op = blockscan_sweep_mbcs;
+ }
+
+ /* Aux yield jobs can only be set up while running on the scheduler that
+ * services them, so we move there before continuing.
+ *
+ * We can't drive the scan itself through this since the scheduler will
+ * always finish *all* misc aux work in one go which makes it impossible to
+ * yield. */
+ erts_schedule_misc_aux_work(sched_id, blockscan_sched_trampoline, scanner);
+}
+
+int erts_handle_yielded_alcu_blockscan(ErtsSchedulerData *esdp,
+ ErtsAlcuBlockscanYieldData *yield)
+{
+ blockscan_t *scanner = yield->current;
+
+ (void)esdp;
+
+ ASSERT((yield->last == NULL) == (yield->current == NULL));
+
+ if (scanner) {
+ if (scanner->next_op(scanner)) {
+ return 1;
+ }
+
+ ASSERT(ERTS_PROC_IS_EXITING(scanner->process) ||
+ scanner->current_op == blockscan_finish);
+
+ yield->current = scanner->scanner_queue;
+
+ if (yield->current == NULL) {
+ ASSERT(scanner == yield->last);
+ yield->last = NULL;
+ }
+
+ erts_proc_dec_refc(scanner->process);
+
+ /* Plain free is intentional. */
+ free(scanner);
+
+ return yield->current != NULL;
+ }
+
+ return 0;
+}
+
+void erts_alcu_sched_spec_data_init(ErtsSchedulerData *esdp)
+{
+ ErtsAlcuBlockscanYieldData *yield;
+
+ yield = ERTS_SCHED_AUX_YIELD_DATA(esdp, alcu_blockscan);
+
+ yield->current = NULL;
+ yield->last = NULL;
+}
+
+/* ------------------------------------------------------------------------- */
+
+static ERTS_INLINE int u64_log2(Uint64 v)
+{
+ static const int log2_tab64[64] = {
+ 63, 0, 58, 1, 59, 47, 53, 2,
+ 60, 39, 48, 27, 54, 33, 42, 3,
+ 61, 51, 37, 40, 49, 18, 28, 20,
+ 55, 30, 34, 11, 43, 14, 22, 4,
+ 62, 57, 46, 52, 38, 26, 32, 41,
+ 50, 36, 17, 19, 29, 10, 13, 21,
+ 56, 45, 25, 31, 35, 16, 9, 12,
+ 44, 24, 15, 8, 23, 7, 6, 5};
+
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+
+ return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
+}
+
+/* ------------------------------------------------------------------------- */
+
+typedef struct hist_tree__ {
+ struct hist_tree__ *parent;
+ struct hist_tree__ *left;
+ struct hist_tree__ *right;
+
+ int is_red;
+
+ alcu_atag_t tag;
+ UWord histogram[1];
+} hist_tree_t;
+
+#define ERTS_RBT_PREFIX hist_tree
+#define ERTS_RBT_T hist_tree_t
+#define ERTS_RBT_KEY_T UWord
+#define ERTS_RBT_FLAGS_T int
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) ((void)0)
+#define ERTS_RBT_IS_RED(T) ((T)->is_red)
+#define ERTS_RBT_SET_RED(T) ((T)->is_red = 1)
+#define ERTS_RBT_IS_BLACK(T) (!ERTS_RBT_IS_RED(T))
+#define ERTS_RBT_SET_BLACK(T) ((T)->is_red = 0)
+#define ERTS_RBT_GET_FLAGS(T) ((T)->is_red)
+#define ERTS_RBT_SET_FLAGS(T, F) ((T)->is_red = F)
+#define ERTS_RBT_GET_PARENT(T) ((T)->parent)
+#define ERTS_RBT_SET_PARENT(T, P) ((T)->parent = P)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->tag)
+#define ERTS_RBT_IS_LT(KX, KY) (KX < KY)
+#define ERTS_RBT_IS_EQ(KX, KY) (KX == KY)
+#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+#define ERTS_RBT_WANT_FOREACH_DESTROY
+#define ERTS_RBT_WANT_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+typedef struct {
+ blockscan_t common;
+
+ ErtsIRefStorage iref;
+ Process *process;
+
+ hist_tree_rbt_yield_state_t hist_tree_yield;
+ hist_tree_t *hist_tree;
+ UWord hist_count;
+
+ UWord hist_slot_start;
+ int hist_slot_count;
+
+ UWord unscanned_size;
+
+ ErtsHeapFactory msg_factory;
+ int building_result;
+ Eterm result_list;
+} gather_ahist_t;
+
+static void gather_ahist_update(gather_ahist_t *state, UWord tag, UWord size)
+{
+ hist_tree_t *hist_node;
+ UWord size_interval;
+ int hist_slot;
+
+ hist_node = hist_tree_rbt_lookup(state->hist_tree, tag);
+
+ if (hist_node == NULL) {
+ /* Plain calloc is intentional. */
+ hist_node = (hist_tree_t*)calloc(1, sizeof(hist_tree_t) +
+ (state->hist_slot_count - 1) *
+ sizeof(hist_node->histogram[0]));
+ hist_node->tag = tag;
+
+ hist_tree_rbt_insert(&state->hist_tree, hist_node);
+ state->hist_count++;
+ }
+
+ size_interval = (size / state->hist_slot_start);
+ size_interval = u64_log2(size_interval + 1);
+
+ hist_slot = MIN(size_interval, state->hist_slot_count - 1);
+
+ hist_node->histogram[hist_slot]++;
+}
+
+static int gather_ahist_scan(Allctr_t *allocator,
+ void *user_data,
+ Carrier_t *carrier)
+{
+ gather_ahist_t *state;
+ int blocks_scanned;
+ Block_t *block;
+
+ state = (gather_ahist_t*)user_data;
+ blocks_scanned = 1;
+
+ if (IS_SB_CARRIER(carrier)) {
+ alcu_atag_t tag;
+
+ block = SBC2BLK(allocator, carrier);
+ tag = GET_BLK_ATAG(block);
+
+ ASSERT(DBG_IS_VALID_ATAG(allocator, tag));
+
+ gather_ahist_update(state, tag, SBC_BLK_SZ(block));
+ } else {
+ UWord scanned_bytes = MBC_HEADER_SIZE(allocator);
+
+ ASSERT(IS_MB_CARRIER(carrier));
+
+ block = MBC_TO_FIRST_BLK(allocator, carrier);
+
+ while (1) {
+ UWord block_size = MBC_BLK_SZ(block);
+
+ if (IS_ALLOCED_BLK(block)) {
+ alcu_atag_t tag = GET_BLK_ATAG(block);
+
+ ASSERT(DBG_IS_VALID_ATAG(allocator, tag));
+
+ gather_ahist_update(state, tag, block_size);
+ }
+
+ scanned_bytes += block_size;
+
+ if (blocks_scanned >= BLOCKSCAN_BAILOUT_THRESHOLD) {
+ state->unscanned_size += CARRIER_SZ(carrier) - scanned_bytes;
+ break;
+ } else if (IS_LAST_BLK(block)) {
+ break;
+ }
+
+ block = NXT_BLK(block);
+ blocks_scanned++;
+ }
+ }
+
+ return blocks_scanned;
+}
+
+static void gather_ahist_append_result(hist_tree_t *node, void *arg)
+{
+ gather_ahist_t *state = (gather_ahist_t*)arg;
+
+ Eterm histogram_tuple, tag_tuple;
+
+ Eterm *hp;
+ int ix;
+
+ ASSERT(state->building_result);
+
+ hp = erts_produce_heap(&state->msg_factory, 7 + state->hist_slot_count, 0);
+
+ hp[0] = make_arityval(state->hist_slot_count);
+
+ for (ix = 0; ix < state->hist_slot_count; ix++) {
+ hp[1 + ix] = make_small(node->histogram[ix]);
+ }
+
+ histogram_tuple = make_tuple(hp);
+ hp += 1 + state->hist_slot_count;
+
+ hp[0] = make_arityval(3);
+ hp[1] = ATAG_ID(node->tag);
+ hp[2] = alloc_type_atoms[ATAG_TYPE(node->tag)];
+ hp[3] = histogram_tuple;
+
+ tag_tuple = make_tuple(hp);
+ hp += 4;
+
+ state->result_list = CONS(hp, tag_tuple, state->result_list);
+
+ /* Plain free is intentional. */
+ free(node);
+}
+
+static void gather_ahist_send(gather_ahist_t *state)
+{
+ Eterm result_tuple, unscanned_size, task_ref;
+
+ Uint term_size;
+ Eterm *hp;
+
+ ASSERT((state->result_list == NIL) ^ (state->hist_count > 0));
+ ASSERT(state->building_result);
+
+ term_size = 4 + erts_iref_storage_heap_size(&state->iref);
+ term_size += IS_USMALL(0, state->unscanned_size) ? 0 : BIG_UINT_HEAP_SIZE;
+
+ hp = erts_produce_heap(&state->msg_factory, term_size, 0);
+
+ task_ref = erts_iref_storage_make_ref(&state->iref, &hp,
+ &(state->msg_factory.message)->hfrag.off_heap, 0);
+
+ unscanned_size = bld_unstable_uint(&hp, NULL, state->unscanned_size);
+
+ hp[0] = make_arityval(3);
+ hp[1] = task_ref;
+ hp[2] = unscanned_size;
+ hp[3] = state->result_list;
+
+ result_tuple = make_tuple(hp);
+
+ erts_factory_trim_and_close(&state->msg_factory, &result_tuple, 1);
+
+ erts_queue_message(state->process, 0, state->msg_factory.message,
+ result_tuple, am_system);
+}
+
+static int gather_ahist_finish(void *arg)
+{
+ gather_ahist_t *state = (gather_ahist_t*)arg;
+
+ if (!state->building_result) {
+ ErtsMessage *message;
+ Uint minimum_size;
+ Eterm *hp;
+
+ /* {Ref, unscanned size, [{Tag, {Histogram}} | Rest]} */
+ minimum_size = 4 + erts_iref_storage_heap_size(&state->iref) +
+ state->hist_count * (7 + state->hist_slot_count);
+
+ message = erts_alloc_message(minimum_size, &hp);
+ erts_factory_selfcontained_message_init(&state->msg_factory,
+ message, hp);
+
+ ERTS_RBT_YIELD_STAT_INIT(&state->hist_tree_yield);
+
+ state->result_list = NIL;
+ state->building_result = 1;
+ }
+
+ if (hist_tree_rbt_foreach_destroy_yielding(&state->hist_tree,
+ &gather_ahist_append_result,
+ state,
+ &state->hist_tree_yield,
+ BLOCKSCAN_REDUCTIONS)) {
+ return 1;
+ }
+
+ gather_ahist_send(state);
+
+ return 0;
+}
+
+static void gather_ahist_destroy_result(hist_tree_t *node, void *arg)
+{
+ (void)arg;
+ free(node);
+}
+
+static void gather_ahist_abort(void *arg)
+{
+ gather_ahist_t *state = (gather_ahist_t*)arg;
+
+ if (state->building_result) {
+ erts_factory_undo(&state->msg_factory);
+ }
+
+ hist_tree_rbt_foreach_destroy(&state->hist_tree,
+ &gather_ahist_destroy_result,
+ NULL);
+}
+
+int erts_alcu_gather_alloc_histograms(Process *p, int allocator_num,
+ int sched_id, int hist_width,
+ UWord hist_start, Eterm ref)
+{
+ gather_ahist_t *gather_state;
+ blockscan_t *scanner;
+ Allctr_t *allocator;
+
+ ASSERT(is_internal_ref(ref));
+
+ if (!blockscan_get_specific_allocator(allocator_num,
+ sched_id,
+ &allocator)) {
+ return 0;
+ } else if (!allocator->atags) {
+ return 0;
+ }
+
+ ensure_atoms_initialized(allocator);
+
+ /* Plain calloc is intentional. */
+ gather_state = (gather_ahist_t*)calloc(1, sizeof(gather_ahist_t));
+ scanner = &gather_state->common;
+
+ scanner->abort = gather_ahist_abort;
+ scanner->scan = gather_ahist_scan;
+ scanner->finish = gather_ahist_finish;
+ scanner->user_data = gather_state;
+
+ erts_iref_storage_save(&gather_state->iref, ref);
+ gather_state->hist_slot_start = hist_start;
+ gather_state->hist_slot_count = hist_width;
+ gather_state->process = p;
+
+ blockscan_dispatch(scanner, p, allocator, sched_id);
+
+ return 1;
+}
+
+/* ------------------------------------------------------------------------- */
+
+typedef struct chist_node__ {
+ struct chist_node__ *next;
+
+ UWord carrier_size;
+ UWord unscanned_size;
+ UWord allocated_size;
+
+ /* BLOCKSCAN_BAILOUT_THRESHOLD guarantees we won't overflow this or the
+ * counters in the free block histogram. */
+ int allocated_count;
+ int flags;
+
+ int histogram[1];
+} chist_node_t;
+
+typedef struct {
+ blockscan_t common;
+
+ ErtsIRefStorage iref;
+ Process *process;
+
+ Eterm allocator_desc;
+
+ chist_node_t *info_list;
+ UWord info_count;
+
+ UWord hist_slot_start;
+ int hist_slot_count;
+
+ ErtsHeapFactory msg_factory;
+ int building_result;
+ Eterm result_list;
+} gather_cinfo_t;
+
+static int gather_cinfo_scan(Allctr_t *allocator,
+ void *user_data,
+ Carrier_t *carrier)
+{
+ gather_cinfo_t *state;
+ chist_node_t *node;
+ int blocks_scanned;
+ Block_t *block;
+
+ state = (gather_cinfo_t*)user_data;
+ node = calloc(1, sizeof(chist_node_t) +
+ (state->hist_slot_count - 1) *
+ sizeof(node->histogram[0]));
+ blocks_scanned = 1;
+
+ /* ERTS_CRR_ALCTR_FLG_BUSY is ignored since we've set it ourselves and it
+ * would be misleading to include it. */
+ node->flags = erts_atomic_read_rb(&carrier->allctr) &
+ (ERTS_CRR_ALCTR_FLG_MASK & ~ERTS_CRR_ALCTR_FLG_BUSY);
+ node->carrier_size = CARRIER_SZ(carrier);
+
+ if (IS_SB_CARRIER(carrier)) {
+ UWord block_size;
+
+ block = SBC2BLK(allocator, carrier);
+ block_size = SBC_BLK_SZ(block);
+
+ node->allocated_size = block_size;
+ node->allocated_count = 1;
+ } else {
+ UWord scanned_bytes = MBC_HEADER_SIZE(allocator);
+
+ block = MBC_TO_FIRST_BLK(allocator, carrier);
+
+ while (1) {
+ UWord block_size = MBC_BLK_SZ(block);
+
+ scanned_bytes += block_size;
+
+ if (IS_ALLOCED_BLK(block)) {
+ node->allocated_size += block_size;
+ node->allocated_count++;
+ } else {
+ UWord size_interval;
+ int hist_slot;
+
+ size_interval = (block_size / state->hist_slot_start);
+ size_interval = u64_log2(size_interval + 1);
+
+ hist_slot = MIN(size_interval, state->hist_slot_count - 1);
+
+ node->histogram[hist_slot]++;
+ }
+
+ if (blocks_scanned >= BLOCKSCAN_BAILOUT_THRESHOLD) {
+ node->unscanned_size += CARRIER_SZ(carrier) - scanned_bytes;
+ break;
+ } else if (IS_LAST_BLK(block)) {
+ break;
+ }
+
+ block = NXT_BLK(block);
+ blocks_scanned++;
+ }
+ }
+
+ node->next = state->info_list;
+ state->info_list = node;
+ state->info_count++;
+
+ return blocks_scanned;
+}
+
+static void gather_cinfo_append_result(gather_cinfo_t *state,
+ chist_node_t *info)
+{
+ Eterm carrier_size, unscanned_size, allocated_size;
+ Eterm histogram_tuple, carrier_tuple;
+
+ Uint term_size;
+ Eterm *hp;
+ int ix;
+
+ ASSERT(state->building_result);
+
+ term_size = 11 + state->hist_slot_count;
+ term_size += IS_USMALL(0, info->carrier_size) ? 0 : BIG_UINT_HEAP_SIZE;
+ term_size += IS_USMALL(0, info->unscanned_size) ? 0 : BIG_UINT_HEAP_SIZE;
+ term_size += IS_USMALL(0, info->allocated_size) ? 0 : BIG_UINT_HEAP_SIZE;
+
+ hp = erts_produce_heap(&state->msg_factory, term_size, 0);
+
+ hp[0] = make_arityval(state->hist_slot_count);
+
+ for (ix = 0; ix < state->hist_slot_count; ix++) {
+ hp[1 + ix] = make_small(info->histogram[ix]);
+ }
+
+ histogram_tuple = make_tuple(hp);
+ hp += 1 + state->hist_slot_count;
+
+ carrier_size = bld_unstable_uint(&hp, NULL, info->carrier_size);
+ unscanned_size = bld_unstable_uint(&hp, NULL, info->unscanned_size);
+ allocated_size = bld_unstable_uint(&hp, NULL, info->allocated_size);
+
+ hp[0] = make_arityval(7);
+ hp[1] = state->allocator_desc;
+ hp[2] = carrier_size;
+ hp[3] = unscanned_size;
+ hp[4] = allocated_size;
+ hp[5] = make_small(info->allocated_count);
+ hp[6] = (info->flags & ERTS_CRR_ALCTR_FLG_IN_POOL) ? am_true : am_false;
+ hp[7] = histogram_tuple;
+
+ carrier_tuple = make_tuple(hp);
+ hp += 8;
+
+ state->result_list = CONS(hp, carrier_tuple, state->result_list);
+
+ free(info);
+}
+
+static void gather_cinfo_send(gather_cinfo_t *state)
+{
+ Eterm result_tuple, task_ref;
+
+ int term_size;
+ Eterm *hp;
+
+ ASSERT((state->result_list == NIL) ^ (state->info_count > 0));
+ ASSERT(state->building_result);
+
+ term_size = 3 + erts_iref_storage_heap_size(&state->iref);
+ hp = erts_produce_heap(&state->msg_factory, term_size, 0);
+
+ task_ref = erts_iref_storage_make_ref(&state->iref, &hp,
+ &(state->msg_factory.message)->hfrag.off_heap, 0);
+
+ hp[0] = make_arityval(2);
+ hp[1] = task_ref;
+ hp[2] = state->result_list;
+
+ result_tuple = make_tuple(hp);
+
+ erts_factory_trim_and_close(&state->msg_factory, &result_tuple, 1);
+
+ erts_queue_message(state->process, 0, state->msg_factory.message,
+ result_tuple, am_system);
+}
+
+static int gather_cinfo_finish(void *arg)
+{
+ gather_cinfo_t *state = (gather_cinfo_t*)arg;
+ int reductions = BLOCKSCAN_REDUCTIONS;
+
+ if (!state->building_result) {
+ ErtsMessage *message;
+ Uint minimum_size;
+ Eterm *hp;
+
+ /* {Ref, [{Carrier size, unscanned size, allocated size,
+ * allocated block count, {Free block histogram}} | Rest]} */
+ minimum_size = 3 + erts_iref_storage_heap_size(&state->iref) +
+ state->info_count * (11 + state->hist_slot_count);
+
+ message = erts_alloc_message(minimum_size, &hp);
+ erts_factory_selfcontained_message_init(&state->msg_factory,
+ message, hp);
+
+ state->result_list = NIL;
+ state->building_result = 1;
+ }
+
+ while (state->info_list) {
+ chist_node_t *current = state->info_list;
+ state->info_list = current->next;
+
+ gather_cinfo_append_result(state, current);
+
+ if (reductions-- <= 0) {
+ return 1;
+ }
+ }
+
+ gather_cinfo_send(state);
+
+ return 0;
+}
+
+static void gather_cinfo_abort(void *arg)
+{
+ gather_cinfo_t *state = (gather_cinfo_t*)arg;
+
+ if (state->building_result) {
+ erts_factory_undo(&state->msg_factory);
+ }
+
+ while (state->info_list) {
+ chist_node_t *current = state->info_list;
+ state->info_list = current->next;
+
+ free(current);
+ }
+}
+
+int erts_alcu_gather_carrier_info(struct process *p, int allocator_num,
+ int sched_id, int hist_width,
+ UWord hist_start, Eterm ref)
+{
+ gather_cinfo_t *gather_state;
+ blockscan_t *scanner;
+
+ const char *allocator_desc;
+ Allctr_t *allocator;
+
+ ASSERT(is_internal_ref(ref));
+
+ if (!blockscan_get_specific_allocator(allocator_num,
+ sched_id,
+ &allocator)) {
+ return 0;
+ }
+
+ allocator_desc = ERTS_ALC_A2AD(allocator_num);
+
+ /* Plain calloc is intentional. */
+ gather_state = (gather_cinfo_t*)calloc(1, sizeof(gather_cinfo_t));
+ scanner = &gather_state->common;
+
+ scanner->abort = gather_cinfo_abort;
+ scanner->scan = gather_cinfo_scan;
+ scanner->finish = gather_cinfo_finish;
+ scanner->user_data = gather_state;
+
+ gather_state->allocator_desc = erts_atom_put((byte *)allocator_desc,
+ sys_strlen(allocator_desc),
+ ERTS_ATOM_ENC_LATIN1, 1);
+ erts_iref_storage_save(&gather_state->iref, ref);
+ gather_state->hist_slot_start = hist_start * 2;
+ gather_state->hist_slot_count = hist_width;
+ gather_state->process = p;
+
+ blockscan_dispatch(scanner, p, allocator, sched_id);
+
+ return 1;
+}
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* NOTE: erts_alcu_test() is only supposed to be used for testing. *
@@ -6332,7 +7651,6 @@ erts_alcu_test(UWord op, UWord a1, UWord a2)
case 0x01c: return (UWord) BLK_TO_MBC((Block_t*) a1);
case 0x01d: ((Allctr_t*) a1)->add_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
case 0x01e: ((Allctr_t*) a1)->remove_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
-#ifdef ERTS_SMP
case 0x01f: return (UWord) sizeof(ErtsAlcCrrPool_t);
case 0x020:
SET_CARRIER_HDR((Carrier_t *) a2, 0, SCH_SYS_ALLOC|SCH_MBC, (Allctr_t *) a1);
@@ -6346,14 +7664,6 @@ erts_alcu_test(UWord op, UWord a1, UWord a2)
return (UWord) a2;
case 0x023: return (UWord) cpool_is_empty((Allctr_t *) a1);
case 0x024: return (UWord) cpool_dbg_is_in_pool((Allctr_t *) a1, (Carrier_t *) a2);
-#else
- case 0x01f: return (UWord) 0;
- case 0x020: return (UWord) 0;
- case 0x021: return (UWord) 0;
- case 0x022: return (UWord) 0;
- case 0x023: return (UWord) 0;
- case 0x024: return (UWord) 0;
-#endif
case 0x025: /* UMEM2BLK_TEST*/
#ifdef DEBUG
# ifdef HARD_DEBUG
@@ -6411,15 +7721,12 @@ erts_alcu_verify_unused(Allctr_t *allctr)
void
erts_alcu_verify_unused_ts(Allctr_t *allctr)
{
-#ifdef USE_THREADS
erts_mtx_lock(&allctr->mutex);
-#endif
erts_alcu_verify_unused(allctr);
-#ifdef USE_THREADS
erts_mtx_unlock(&allctr->mutex);
-#endif
}
+
#ifdef DEBUG
int is_sbc_blk(Block_t* blk)
{
@@ -6440,11 +7747,6 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
ASSERT(SBC2BLK(allctr, sbc) == iblk);
ASSERT(CARRIER_SZ(sbc) - SBC_HEADER_SIZE >= SBC_BLK_SZ(iblk));
-#if HAVE_ERTS_MSEG
- if (IS_MSEG_CARRIER(sbc)) {
- ASSERT(CARRIER_SZ(sbc) % ERTS_SACRR_UNIT_SZ == 0);
- }
-#endif
crr = sbc;
cl = &allctr->sbc_list;
}
@@ -6553,3 +7855,45 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
#endif /* ERTS_ALLOC_UTIL_HARD_DEBUG */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+static void lcnt_enable_allocator_lock_count(Allctr_t *allocator, int enable) {
+ if(!allocator->thread_safe) {
+ return;
+ }
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&allocator->mutex.lcnt,
+ "alcu_allocator", make_small(allocator->alloc_no),
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+ } else {
+ erts_lcnt_uninstall(&allocator->mutex.lcnt);
+ }
+}
+
+static void lcnt_update_thread_spec_locks(ErtsAllocatorThrSpec_t *tspec, int enable) {
+ if(tspec->enabled) {
+ int i;
+
+ for(i = 0; i < tspec->size; i++) {
+ lcnt_enable_allocator_lock_count(tspec->allctr[i], enable);
+ }
+ }
+}
+
+void erts_lcnt_update_allocator_locks(int enable) {
+ int i;
+
+ for(i = ERTS_ALC_A_MIN; i < ERTS_ALC_A_MAX; i++) {
+ ErtsAllocatorInfo_t *ai = &erts_allctrs_info[i];
+
+ if(ai->enabled && ai->alloc_util) {
+ if(ai->thr_spec) {
+ lcnt_update_thread_spec_locks((ErtsAllocatorThrSpec_t*)ai->extra, enable);
+ } else {
+ lcnt_enable_allocator_lock_count((Allctr_t*)ai->extra, enable);
+ }
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index f50f09907a..f26ace1534 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,10 +24,8 @@
#define ERTS_ALCU_VSN_STR "3.0"
#include "erl_alloc_types.h"
-#ifdef USE_THREADS
#define ERL_THREADS_EMU_INTERNAL__
#include "erl_threads.h"
-#endif
#include "erl_mseg.h"
#include "lttng-wrapper.h"
@@ -52,6 +50,7 @@ typedef struct {
int tspec;
int tpref;
int ramv;
+ int atags;
UWord sbct;
UWord asbcst;
UWord rsbcst;
@@ -63,7 +62,9 @@ typedef struct {
UWord lmbcs;
UWord smbcs;
UWord mbcgs;
- int acul;
+ UWord acul;
+ UWord acnl;
+ UWord acfml;
void *fix;
size_t *fix_type_size;
@@ -106,6 +107,7 @@ typedef struct {
0, /* (bool) tspec: thread specific */\
0, /* (bool) tpref: thread preferred */\
0, /* (bool) ramv: realloc always moves */\
+ 0, /* (bool) atags: tagged allocations */\
512*1024, /* (bytes) sbct: sbc threshold */\
2*1024*2024, /* (amount) asbcst: abs sbc shrink threshold */\
20, /* (%) rsbcst: rel sbc shrink threshold */\
@@ -118,6 +120,8 @@ typedef struct {
1024*1024, /* (bytes) smbcs: smallest mbc size */\
10, /* (amount) mbcgs: mbc growth stages */\
0, /* (%) acul: abandon carrier utilization limit */\
+ 1000, /* (amount) acnl: abandoned carriers number limit */\
+ 0, /* (bytes) acfml: abandoned carrier fblk min limit */\
/* --- Data not options -------------------------------------------- */\
NULL, /* (ptr) fix */\
NULL /* (ptr) fix_type_size */\
@@ -140,6 +144,7 @@ typedef struct {
0, /* (bool) tspec: thread specific */\
0, /* (bool) tpref: thread preferred */\
0, /* (bool) ramv: realloc always moves */\
+ 0, /* (bool) atags: tagged allocations */\
64*1024, /* (bytes) sbct: sbc threshold */\
2*1024*2024, /* (amount) asbcst: abs sbc shrink threshold */\
20, /* (%) rsbcst: rel sbc shrink threshold */\
@@ -151,6 +156,8 @@ typedef struct {
128*1024, /* (bytes) smbcs: smallest mbc size */\
10, /* (amount) mbcgs: mbc growth stages */\
0, /* (%) acul: abandon carrier utilization limit */\
+ 1000, /* (amount) acnl: abandoned carriers number limit */\
+ 0, /* (bytes) acfml: abandoned carrier fblk min limit */\
/* --- Data not options -------------------------------------------- */\
NULL, /* (ptr) fix */\
NULL /* (ptr) fix_type_size */\
@@ -162,12 +169,10 @@ void * erts_alcu_alloc(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free(ErtsAlcType_t, void *, void *);
-#ifdef USE_THREADS
void * erts_alcu_alloc_ts(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_ts(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_ts(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free_ts(ErtsAlcType_t, void *, void *);
-#ifdef ERTS_SMP
void * erts_alcu_alloc_thr_spec(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_thr_spec(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t, void *, void *, Uint);
@@ -176,18 +181,14 @@ void * erts_alcu_alloc_thr_pref(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_thr_pref(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free_thr_pref(ErtsAlcType_t, void *, void *);
-#endif
-#endif
-Eterm erts_alcu_au_info_options(int *, void *, Uint **, Uint *);
-Eterm erts_alcu_info_options(Allctr_t *, int *, void *, Uint **, Uint *);
-Eterm erts_alcu_sz_info(Allctr_t *, int, int, int *, void *, Uint **, Uint *);
-Eterm erts_alcu_info(Allctr_t *, int, int, int *, void *, Uint **, Uint *);
+Eterm erts_alcu_au_info_options(fmtfn_t *, void *, Uint **, Uint *);
+Eterm erts_alcu_info_options(Allctr_t *, fmtfn_t *, void *, Uint **, Uint *);
+Eterm erts_alcu_sz_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *);
+Eterm erts_alcu_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *);
void erts_alcu_init(AlcUInit_t *);
void erts_alcu_current_size(Allctr_t *, AllctrSize_t *,
ErtsAlcUFixInfo_t *, int);
-#ifdef ERTS_SMP
void erts_alcu_check_delayed_dealloc(Allctr_t *, int, int *, ErtsThrPrgrVal *, int *);
-#endif
erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
#ifdef ARCH_32
@@ -195,10 +196,6 @@ extern UWord erts_literal_vspace_map[];
# define ERTS_VSPACE_WORD_BITS (sizeof(UWord)*8)
#endif
-void* erts_alcu_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
-void* erts_alcu_mseg_realloc(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
-void erts_alcu_mseg_dealloc(Allctr_t*, void *seg, Uint size, Uint flags);
-
#if HAVE_ERTS_MSEG
# if defined(ARCH_32)
void* erts_alcu_literal_32_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
@@ -210,17 +207,56 @@ void* erts_alcu_mmapper_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
void* erts_alcu_mmapper_mseg_realloc(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
void erts_alcu_mmapper_mseg_dealloc(Allctr_t*, void *seg, Uint size, Uint flags);
# endif
+
+# if defined(ERTS_ALC_A_EXEC)
+void* erts_alcu_exec_mseg_alloc(Allctr_t*, Uint *size_p, Uint flags);
+void* erts_alcu_exec_mseg_realloc(Allctr_t*, void *seg, Uint old_size, Uint *new_size_p);
+void erts_alcu_exec_mseg_dealloc(Allctr_t*, void *seg, Uint size, Uint flags);
+# endif
#endif /* HAVE_ERTS_MSEG */
-void* erts_alcu_sys_alloc(Allctr_t*, Uint *size_p, int superalign);
-void* erts_alcu_sys_realloc(Allctr_t*, void *ptr, Uint *size_p, Uint old_size, int superalign);
-void erts_alcu_sys_dealloc(Allctr_t*, void *ptr, Uint size, int superalign);
#ifdef ARCH_32
void* erts_alcu_literal_32_sys_alloc(Allctr_t*, Uint *size_p, int superalign);
void* erts_alcu_literal_32_sys_realloc(Allctr_t*, void *ptr, Uint *size_p, Uint old_size, int superalign);
void erts_alcu_literal_32_sys_dealloc(Allctr_t*, void *ptr, Uint size, int superalign);
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_update_allocator_locks(int enable);
+#endif
+
+int erts_alcu_try_set_dyn_param(Allctr_t*, Eterm param, Uint value);
+
+/* Gathers per-tag allocation histograms from the given allocator number
+ * (ERTS_ALC_A_*) and scheduler id. An id of 0 means the global instance will
+ * be used.
+ *
+ * The results are sent to `p`, and it returns the number of messages to wait
+ * for. */
+int erts_alcu_gather_alloc_histograms(struct process *p, int allocator_num,
+ int sched_id, int hist_width,
+ UWord hist_start, Eterm ref);
+
+/* Gathers per-carrier info from the given allocator number (ERTS_ALC_A_*) and
+ * scheduler id. An id of 0 means the global instance will be used.
+ *
+ * The results are sent to `p`, and it returns the number of messages to wait
+ * for. */
+int erts_alcu_gather_carrier_info(struct process *p, int allocator_num,
+ int sched_id, int hist_width,
+ UWord hist_start, Eterm ref);
+
+struct alcu_blockscan;
+
+typedef struct {
+ struct alcu_blockscan *current;
+ struct alcu_blockscan *last;
+} ErtsAlcuBlockscanYieldData;
+
+int erts_handle_yielded_alcu_blockscan(struct ErtsSchedulerData_ *esdp,
+ ErtsAlcuBlockscanYieldData *yield);
+void erts_alcu_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);
+
#endif /* !ERL_ALLOC_UTIL__ */
#if defined(GET_ERL_ALLOC_UTIL_IMPL) && !defined(ERL_ALLOC_UTIL_IMPL__)
@@ -235,10 +271,6 @@ void erts_alcu_literal_32_sys_dealloc(Allctr_t*, void *ptr, Uint size, int supe
# endif
#endif
-#undef MIN
-#undef MAX
-#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
-#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
#define FLOOR(X, I) (((X)/(I))*(I))
#define CEILING(X, I) ((((X) - 1)/(I) + 1)*(I))
@@ -305,45 +337,7 @@ void erts_alcu_literal_32_sys_dealloc(Allctr_t*, void *ptr, Uint size, int supe
typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t;
-#ifdef ERTS_SMP
-
-typedef struct ErtsDoubleLink_t_ {
- struct ErtsDoubleLink_t_ *next;
- struct ErtsDoubleLink_t_ *prev;
-}ErtsDoubleLink_t;
-
-typedef struct {
- erts_atomic_t next;
- erts_atomic_t prev;
- Allctr_t *orig_allctr; /* read-only while carrier is alive */
- ErtsThrPrgrVal thr_prgr;
- erts_atomic_t max_size;
- UWord abandon_limit;
- UWord blocks;
- UWord blocks_size;
- ErtsDoubleLink_t abandoned; /* node in pooled_list or traitor_list */
-} ErtsAlcCPoolData_t;
-
-#endif
-
typedef struct Carrier_t_ Carrier_t;
-struct Carrier_t_ {
- UWord chdr;
- Carrier_t *next;
- Carrier_t *prev;
- erts_smp_atomic_t allctr;
-#ifdef ERTS_SMP
- ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */
-#endif
-};
-
-#define ERTS_ALC_CARRIER_TO_ALLCTR(C) \
- ((Allctr_t *) (erts_smp_atomic_read_nob(&(C)->allctr) & ~FLG_MASK))
-
-typedef struct {
- Carrier_t *first;
- Carrier_t *last;
-} CarrierList_t;
typedef struct {
UWord bhdr;
@@ -357,6 +351,22 @@ typedef struct {
#endif
} Block_t;
+typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t;
+
+union ErtsAllctrDDBlock_t_ {
+ erts_atomic_t atmc_next;
+ ErtsAllctrDDBlock_t *ptr_next;
+};
+
+typedef struct {
+ Block_t blk;
+#if !MBC_ABLK_OFFSET_BITS
+ ErtsAllctrDDBlock_t umem_;
+#endif
+} ErtsFakeDDBlock_t;
+
+
+
#define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0)
#define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1)
#define LAST_BLK_HDR_FLG (((UWord) 1) << 2)
@@ -365,14 +375,13 @@ typedef struct {
(THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
/*
- * FREE_LAST_MBC_BLK_HDR_FLGS is a special flag combo used for
- * distinguishing empty mbc's from allocated blocks in
- * handle_delayed_dealloc().
+ * HOMECOMING_MBC_BLK_HDR is a special block header combo used for
+ * distinguishing MBC's from allocated blocks in handle_delayed_dealloc().
*/
-#define FREE_LAST_MBC_BLK_HDR_FLGS (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
+#define HOMECOMING_MBC_BLK_HDR (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
#define IS_FREE_LAST_MBC_BLK(B) \
- (((B)->bhdr & FLG_MASK) == FREE_LAST_MBC_BLK_HDR_FLGS)
+ (((B)->bhdr & FLG_MASK) == (THIS_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG))
#define IS_SBC_BLK(B) (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG)
#define IS_MBC_BLK(B) (!IS_SBC_BLK((B)))
@@ -396,6 +405,61 @@ typedef struct {
typedef UWord FreeBlkFtr_t; /* Footer of a free block */
+/* This AOFF stuff really belong in erl_ao_firstfit_alloc.h */
+typedef struct AOFF_RBTree_t_ AOFF_RBTree_t;
+struct AOFF_RBTree_t_ {
+ Block_t hdr;
+ AOFF_RBTree_t *parent;
+ AOFF_RBTree_t *left;
+ AOFF_RBTree_t *right;
+ Uint32 flags;
+ Uint32 max_sz; /* of all blocks in this sub-tree */
+ union {
+ AOFF_RBTree_t* next; /* for best fit */
+ Sint64 birth_time; /* for age first fit */
+ } u;
+};
+
+void aoff_add_pooled_mbc(Allctr_t*, Carrier_t*);
+void aoff_remove_pooled_mbc(Allctr_t*, Carrier_t*);
+Carrier_t* aoff_lookup_pooled_mbc(Allctr_t*, Uint size);
+void erts_aoff_larger_max_size(AOFF_RBTree_t *node);
+
+typedef struct {
+ ErtsFakeDDBlock_t homecoming_dd;
+ erts_atomic_t next;
+ erts_atomic_t prev;
+ Allctr_t *orig_allctr; /* read-only while carrier is alive */
+ ErtsThrPrgrVal thr_prgr;
+ erts_atomic_t max_size;
+ UWord abandon_limit;
+ UWord blocks;
+ UWord blocks_size;
+ enum {
+ ERTS_MBC_IS_HOME,
+ ERTS_MBC_WAS_POOLED,
+ ERTS_MBC_WAS_TRAITOR
+ } state;
+ AOFF_RBTree_t pooled; /* node in pooled_tree */
+} ErtsAlcCPoolData_t;
+
+struct Carrier_t_ {
+ UWord chdr;
+ Carrier_t *next;
+ Carrier_t *prev;
+ erts_atomic_t allctr;
+ ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */
+};
+
+#define ERTS_ALC_CARRIER_TO_ALLCTR(C) \
+ ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~FLG_MASK))
+
+typedef struct {
+ Carrier_t *first;
+ Carrier_t *last;
+} CarrierList_t;
+
+
typedef Uint64 CallCounter_t;
typedef struct {
@@ -431,14 +495,6 @@ typedef struct {
} while (0)
#endif
-#ifdef ERTS_SMP
-
-typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t;
-
-union ErtsAllctrDDBlock_t_ {
- erts_atomic_t atmc_next;
- ErtsAllctrDDBlock_t *ptr_next;
-};
typedef struct {
ErtsAllctrDDBlock_t marker;
@@ -474,7 +530,6 @@ typedef struct {
} head;
} ErtsAllctrDDQueue_t;
-#endif
typedef struct {
size_t type_size;
@@ -497,7 +552,6 @@ typedef struct {
} ErtsAlcFixList_t;
struct Allctr_t_ {
-#ifdef ERTS_SMP
struct {
/*
* We want the queue at the beginning of
@@ -508,7 +562,6 @@ struct Allctr_t_ {
int use;
int ix;
} dd;
-#endif
/* Allocator name prefix */
char * name_prefix;
@@ -532,6 +585,7 @@ struct Allctr_t_ {
/* Options */
int t;
int ramv;
+ int atags;
Uint sbc_threshold;
Uint sbc_move_threshold;
Uint mbc_move_threshold;
@@ -554,29 +608,39 @@ struct Allctr_t_ {
UWord crr_set_flgs;
UWord crr_clr_flgs;
- /* Carriers */
+ /* Carriers *employed* by this allocator */
CarrierList_t mbc_list;
CarrierList_t sbc_list;
-#ifdef ERTS_SMP
struct {
- /* pooled_list, traitor list and dc_list contain only
- carriers _created_ by this allocator */
- ErtsDoubleLink_t pooled_list;
- ErtsDoubleLink_t traitor_list;
+ /* pooled_tree and dc_list contain only
+ carriers *created* by this allocator */
+ AOFF_RBTree_t* pooled_tree;
CarrierList_t dc_list;
UWord abandon_limit;
int disable_abandon;
int check_limit_count;
- int util_limit;
+ UWord util_limit; /* acul */
+ UWord in_pool_limit; /* acnl */
+ UWord fblk_min_limit; /* acmfl */
struct {
erts_atomic_t blocks_size;
erts_atomic_t no_blocks;
erts_atomic_t carriers_size;
erts_atomic_t no_carriers;
+ CallCounter_t fail_pooled;
+ CallCounter_t fail_shared;
+ CallCounter_t fail_pend_dealloc;
+ CallCounter_t fail;
+ CallCounter_t fetch;
+ CallCounter_t skip_size;
+ CallCounter_t skip_busy;
+ CallCounter_t skip_not_pooled;
+ CallCounter_t skip_homecoming;
+ CallCounter_t skip_race;
+ CallCounter_t entrance_removed;
} stat;
} cpool;
-#endif
/* Main carrier (if there is one) */
Carrier_t * main_carrier;
@@ -586,7 +650,7 @@ struct Allctr_t_ {
Block_t *, Uint);
void (*link_free_block) (Allctr_t *, Block_t *);
void (*unlink_free_block) (Allctr_t *, Block_t *);
- Eterm (*info_options) (Allctr_t *, char *, int *,
+ Eterm (*info_options) (Allctr_t *, char *, fmtfn_t *,
void *, Uint **, Uint *);
Uint (*get_next_mbc_size) (Allctr_t *);
@@ -608,6 +672,8 @@ struct Allctr_t_ {
void* (*sys_realloc)(Allctr_t *allctr, void *ptr, Uint *size_p, Uint old_size, int superalign);
void (*sys_dealloc)(Allctr_t *allctr, void *ptr, Uint size, int superalign);
+ int (*try_set_dyn_param)(Allctr_t*, Eterm param, Uint value);
+
void (*init_atoms) (void);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
@@ -619,7 +685,6 @@ struct Allctr_t_ {
int fix_shrink_scheduled;
ErtsAlcFixList_t *fix;
-#ifdef USE_THREADS
/* Mutex for this allocator */
erts_mtx_t mutex;
int thread_safe;
@@ -628,7 +693,6 @@ struct Allctr_t_ {
Allctr_t *next;
} ts_list;
-#endif
int atoms_initialized;
@@ -651,15 +715,14 @@ struct Allctr_t_ {
CarriersStats_t mbcs;
#ifdef DEBUG
-#ifdef USE_THREADS
struct {
int saved_tid;
erts_tid_t tid;
} debug;
#endif
-#endif
};
+
int erts_alcu_start(Allctr_t *, AllctrInit_t *);
void erts_alcu_stop(Allctr_t *);
@@ -674,7 +737,6 @@ void erts_alcu_assert_failed(char* expr, char* file, int line, char *func);
int is_sbc_blk(Block_t*);
#endif
-
#endif /* #if defined(GET_ERL_ALLOC_UTIL_IMPL)
&& !defined(ERL_ALLOC_UTIL_IMPL__) */
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index 7e239d1f5d..3f0ab33597 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@
/*
- * Description: An "address order first fit" allocator
+ * Description: A family of "first fit" allocator strategies
* based on a Red-Black (binary search) Tree. The search,
* insert, and delete operations are all O(log n) operations
* on a Red-Black Tree.
@@ -40,6 +40,10 @@
* sorting order. Blocks within the same carrier are sorted
* wrt size instead of address. The 'max_sz' field is maintained
* in order to dismiss entire carriers with too small blocks.
+ * Age Order:
+ * Carriers are ordered by creation time instead of address.
+ * Oldest carrier with a large enough free block is chosen.
+ * No age order supported for blocks.
*
* Authors: Rickard Green/Sverker Eriksson
*/
@@ -53,10 +57,12 @@
#include "erl_ao_firstfit_alloc.h"
#ifdef DEBUG
+# define IS_DEBUG 1
#if 0
#define HARD_DEBUG
#endif
#else
+# define IS_DEBUG 0
#undef HARD_DEBUG
#endif
@@ -92,29 +98,10 @@
#define RBT_ASSERT(x)
#endif
-
-/* Types... */
-typedef struct AOFF_RBTree_t_ AOFF_RBTree_t;
-
-struct AOFF_RBTree_t_ {
- Block_t hdr;
- AOFF_RBTree_t *parent;
- AOFF_RBTree_t *left;
- AOFF_RBTree_t *right;
- Uint32 flags;
- Uint32 max_sz; /* of all blocks in this sub-tree */
-};
#define AOFF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr)
-/* BF block nodes keeps list of all with equal size
- */
-typedef struct {
- AOFF_RBTree_t t;
- AOFF_RBTree_t *next;
-}AOFF_RBTreeList_t;
-
-#define LIST_NEXT(N) (((AOFF_RBTreeList_t*) (N))->next)
-#define LIST_PREV(N) (((AOFF_RBTreeList_t*) (N))->t.parent)
+#define LIST_NEXT(N) (((AOFF_RBTree_t*)(N))->u.next)
+#define LIST_PREV(N) (((AOFF_RBTree_t*)(N))->parent)
typedef struct AOFF_Carrier_t_ AOFF_Carrier_t;
@@ -136,12 +123,12 @@ struct AOFF_Carrier_t_ {
*/
#ifdef HARD_DEBUG
-# define HARD_CHECK_IS_MEMBER(ROOT,NODE) rbt_assert_is_member(ROOT,NODE)
-# define HARD_CHECK_TREE(CRR,FLV,ROOT,SZ) check_tree(CRR, FLV, ROOT, SZ)
-static AOFF_RBTree_t * check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root, Uint);
+# define HARD_CHECK_IS_MEMBER(ROOT,NODE) ASSERT(rbt_is_member(ROOT,NODE))
+# define HARD_CHECK_TREE(CRR,ORDER,ROOT,SZ) check_tree(CRR, ORDER, ROOT, SZ)
+static AOFF_RBTree_t * check_tree(Carrier_t*, enum AOFFSortOrder, AOFF_RBTree_t*, Uint);
#else
# define HARD_CHECK_IS_MEMBER(ROOT,NODE)
-# define HARD_CHECK_TREE(CRR,FLV,ROOT,SZ)
+# define HARD_CHECK_TREE(CRR,ORDER,ROOT,SZ)
#endif
@@ -179,25 +166,61 @@ static ERTS_INLINE void lower_max_size(AOFF_RBTree_t *node,
else ASSERT(new_max == old_max);
}
-static ERTS_INLINE SWord cmp_blocks(enum AOFF_Flavor flavor,
+/*
+ * Set possibly new larger 'max_sz' of node and propagate change toward root
+ */
+void erts_aoff_larger_max_size(AOFF_RBTree_t *node)
+{
+ AOFF_RBTree_t* x = node;
+ const Uint new_sz = node->hdr.bhdr;
+
+ ASSERT(!x->left || x->left->max_sz <= x->max_sz);
+ ASSERT(!x->right || x->right->max_sz <= x->max_sz);
+
+ while (new_sz > x->max_sz) {
+ x->max_sz = new_sz;
+ x = x->parent;
+ if (!x)
+ break;
+ }
+}
+
+/* Compare nodes for both carrier and block trees */
+static ERTS_INLINE SWord cmp_blocks(enum AOFFSortOrder order,
AOFF_RBTree_t* lhs, AOFF_RBTree_t* rhs)
{
ASSERT(lhs != rhs);
- ASSERT(flavor == AOFF_AOFF || FBLK_TO_MBC(&lhs->hdr) == FBLK_TO_MBC(&rhs->hdr));
- if (flavor != AOFF_AOFF) {
- SWord diff = (SWord)AOFF_BLK_SZ(lhs) - (SWord)AOFF_BLK_SZ(rhs);
- if (diff || flavor == AOFF_BF) return diff;
+ if (order == FF_AGEFF) {
+ Sint64 diff = lhs->u.birth_time - rhs->u.birth_time;
+ #ifdef ARCH_64
+ if (diff)
+ return diff;
+ #else
+ if (diff < 0)
+ return -1;
+ else if (diff > 0)
+ return 1;
+ #endif
+ }
+ else {
+ ASSERT(order == FF_AOFF || FBLK_TO_MBC(&lhs->hdr) == FBLK_TO_MBC(&rhs->hdr));
+ if (order != FF_AOFF) {
+ SWord diff = (SWord)AOFF_BLK_SZ(lhs) - (SWord)AOFF_BLK_SZ(rhs);
+ if (diff || order == FF_BF) return diff;
+ }
}
return (char*)lhs - (char*)rhs;
}
-static ERTS_INLINE SWord cmp_cand_blk(enum AOFF_Flavor flavor,
+/* Compare candidate block. Only for block tree */
+static ERTS_INLINE SWord cmp_cand_blk(enum AOFFSortOrder order,
Block_t* cand_blk, AOFF_RBTree_t* rhs)
{
- if (flavor != AOFF_AOFF) {
+ ASSERT(order != FF_AGEFF);
+ if (order != FF_AOFF) {
if (BLK_TO_MBC(cand_blk) == FBLK_TO_MBC(&rhs->hdr)) {
SWord diff = (SWord)MBC_BLK_SZ(cand_blk) - (SWord)MBC_FBLK_SZ(&rhs->hdr);
- if (diff || flavor == AOFF_BF) return diff;
+ if (diff || order == FF_BF) return diff;
}
}
return (char*)cand_blk - (char*)rhs;
@@ -218,22 +241,26 @@ static UWord aoff_largest_fblk_in_mbc(Allctr_t*, Carrier_t*);
/* Generic tree functions used by both carrier and block trees. */
static void rbt_delete(AOFF_RBTree_t** root, AOFF_RBTree_t* del);
-static void rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk);
+static void rbt_insert(enum AOFFSortOrder, AOFF_RBTree_t** root, AOFF_RBTree_t* blk);
static AOFF_RBTree_t* rbt_search(AOFF_RBTree_t* root, Uint size);
-#ifdef HARD_DEBUG
-static int rbt_assert_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node);
-#endif
-static Eterm info_options(Allctr_t *, char *, int *, void *, Uint **, Uint *);
+static Eterm info_options(Allctr_t *, char *, fmtfn_t *, void *, Uint **, Uint *);
static void init_atoms(void);
static int atoms_initialized = 0;
+#ifndef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+static erts_atomic64_t birth_time_counter;
+#endif
+
void
erts_aoffalc_init(void)
{
atoms_initialized = 0;
+#ifndef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ erts_atomic64_init_nob(&birth_time_counter, 0);
+#endif
}
Allctr_t *
@@ -254,12 +281,15 @@ erts_aoffalc_start(AOFFAllctr_t *alc,
sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t));
- alc->flavor = aoffinit->flavor;
+ alc->blk_order = aoffinit->blk_order;
+ alc->crr_order = aoffinit->crr_order;
allctr->mbc_header_size = sizeof(AOFF_Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
- allctr->min_block_size = (aoffinit->flavor == AOFF_BF ?
- sizeof(AOFF_RBTreeList_t):sizeof(AOFF_RBTree_t));
+ allctr->min_block_size = (aoffinit->blk_order == FF_BF
+ ? (offsetof(AOFF_RBTree_t, u.next)
+ + ErtsSizeofMember(AOFF_RBTree_t, u.next))
+ : offsetof(AOFF_RBTree_t, u));
allctr->vsn_str = ERTS_ALC_AOFF_ALLOC_VSN_STR;
@@ -487,9 +517,9 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk)
AOFF_Carrier_t *crr = (AOFF_Carrier_t*) FBLK_TO_MBC(&del->hdr);
ASSERT(crr->rbt_node.hdr.bhdr == crr->root->max_sz);
- HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, 0);
+ HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0);
- if (alc->flavor == AOFF_BF) {
+ if (alc->blk_order == FF_BF) {
ASSERT(del->flags & IS_BF_FLG);
if (IS_LIST_ELEM(del)) {
/* Remove from list */
@@ -510,14 +540,14 @@ aoff_unlink_free_block(Allctr_t *allctr, Block_t *blk)
replace(&crr->root, (AOFF_RBTree_t*)del, LIST_NEXT(del));
- HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, 0);
+ HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0);
return;
}
}
rbt_delete(&crr->root, (AOFF_RBTree_t*)del);
- HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, 0);
+ HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, 0);
/* Update the carrier tree with a potentially new (lower) max_sz
*/
@@ -715,32 +745,33 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block)
ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(&blk_crr->crr));
ASSERT(blk_crr->rbt_node.hdr.bhdr == (blk_crr->root ? blk_crr->root->max_sz : 0));
- HARD_CHECK_TREE(&blk_crr->crr, alc->flavor, blk_crr->root, 0);
+ HARD_CHECK_TREE(&blk_crr->crr, alc->blk_order, blk_crr->root, 0);
- rbt_insert(alc->flavor, &blk_crr->root, blk);
+ rbt_insert(alc->blk_order, &blk_crr->root, blk);
- /* Update the carrier tree with a potentially new (larger) max_sz
- */
+ /*
+ * Update carrier tree with a potentially new (larger) max_sz
+ */
crr_node = &blk_crr->rbt_node;
if (blk_sz > crr_node->hdr.bhdr) {
- ASSERT(blk_sz == blk_crr->root->max_sz);
- crr_node->hdr.bhdr = blk_sz;
- while (blk_sz > crr_node->max_sz) {
- crr_node->max_sz = blk_sz;
- crr_node = crr_node->parent;
- if (!crr_node) break;
- }
+ ASSERT(blk_sz == blk_crr->root->max_sz);
+ crr_node->hdr.bhdr = blk_sz;
+ while (blk_sz > crr_node->max_sz) {
+ crr_node->max_sz = blk_sz;
+ crr_node = crr_node->parent;
+ if (!crr_node) break;
+ }
}
- HARD_CHECK_TREE(&blk_crr->crr, alc->flavor, blk_crr->root, 0);
+ HARD_CHECK_TREE(NULL, alc->crr_order, alc->mbc_root, 0);
}
static void
-rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk)
+rbt_insert(enum AOFFSortOrder order, AOFF_RBTree_t** root, AOFF_RBTree_t* blk)
{
Uint blk_sz = AOFF_BLK_SZ(blk);
#ifdef DEBUG
- blk->flags = (flavor == AOFF_BF) ? IS_BF_FLG : 0;
+ blk->flags = (order == FF_BF) ? IS_BF_FLG : 0;
#else
blk->flags = 0;
#endif
@@ -760,7 +791,7 @@ rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk)
if (x->max_sz < blk_sz) {
x->max_sz = blk_sz;
}
- diff = cmp_blocks(flavor, blk, x);
+ diff = cmp_blocks(order, blk, x);
if (diff < 0) {
if (!x->left) {
blk->parent = x;
@@ -778,7 +809,7 @@ rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk)
x = x->right;
}
else {
- ASSERT(flavor == AOFF_BF);
+ ASSERT(order == FF_BF);
ASSERT(blk->flags & IS_BF_FLG);
ASSERT(x->flags & IS_BF_FLG);
SET_LIST_ELEM(blk);
@@ -798,7 +829,7 @@ rbt_insert(enum AOFF_Flavor flavor, AOFF_RBTree_t** root, AOFF_RBTree_t* blk)
if (IS_RED(blk->parent))
tree_insert_fixup(root, blk);
}
- if (flavor == AOFF_BF) {
+ if (order == FF_BF) {
SET_TREE_NODE(blk);
LIST_NEXT(blk) = NULL;
}
@@ -826,6 +857,16 @@ rbt_search(AOFF_RBTree_t* root, Uint size)
}
}
+Carrier_t* aoff_lookup_pooled_mbc(Allctr_t* allctr, Uint size)
+{
+ AOFF_RBTree_t* node;
+
+ if (!allctr->cpool.pooled_tree)
+ return NULL;
+ node = rbt_search(allctr->cpool.pooled_tree, size);
+ return node ? ErtsContainerStruct(node, Carrier_t, cpool.pooled) : NULL;
+}
+
static Block_t *
aoff_get_free_block(Allctr_t *allctr, Uint size,
Block_t *cand_blk, Uint cand_size)
@@ -850,7 +891,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,
/* Get block within carrier tree
*/
#ifdef HARD_DEBUG
- dbg_blk = HARD_CHECK_TREE(&crr->crr, alc->flavor, crr->root, size);
+ dbg_blk = HARD_CHECK_TREE(&crr->crr, alc->blk_order, crr->root, size);
#endif
blk = rbt_search(crr->root, size);
@@ -863,7 +904,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,
if (!blk)
return NULL;
- if (cand_blk && cmp_cand_blk(alc->flavor, cand_blk, blk) < 0) {
+ if (cand_blk && cmp_cand_blk(alc->blk_order, cand_blk, blk) < 0) {
return NULL; /* cand_blk was better */
}
@@ -872,23 +913,35 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,
return (Block_t *) blk;
}
+static ERTS_INLINE Sint64 get_birth_time(void)
+{
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ return (Sint64) erts_os_monotonic_time();
+#else
+ return (Sint64) erts_atomic64_inc_read_nob(&birth_time_counter);
+#endif
+}
+
static void aoff_creating_mbc(Allctr_t *allctr, Carrier_t *carrier)
{
AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
AOFF_RBTree_t **root = &alc->mbc_root;
- HARD_CHECK_TREE(NULL, 0, *root, 0);
+ HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
- /* Link carrier in address order tree
- */
crr->rbt_node.hdr.bhdr = 0;
- rbt_insert(AOFF_AOFF, root, &crr->rbt_node);
+ if (alc->crr_order == FF_AGEFF || IS_DEBUG) {
+ Sint64 bt = get_birth_time();
+ crr->rbt_node.u.birth_time = bt;
+ crr->crr.cpool.pooled.u.birth_time = bt;
+ }
+ rbt_insert(alc->crr_order, root, &crr->rbt_node);
/* aoff_link_free_block will add free block later */
crr->root = NULL;
- HARD_CHECK_TREE(NULL, 0, *root, 0);
+ HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
}
#define IS_CRR_IN_TREE(CRR,ROOT) \
@@ -911,27 +964,39 @@ static void aoff_add_mbc(Allctr_t *allctr, Carrier_t *carrier)
AOFF_RBTree_t **root = &alc->mbc_root;
ASSERT(!IS_CRR_IN_TREE(crr, *root));
- HARD_CHECK_TREE(NULL, 0, *root, 0);
+ HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
+
+ rbt_insert(alc->crr_order, root, &crr->rbt_node);
+
+ HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
+}
+
+void aoff_add_pooled_mbc(Allctr_t *allctr, Carrier_t *crr)
+{
+ AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
+ AOFF_RBTree_t **root = &allctr->cpool.pooled_tree;
+
+ ASSERT(allctr == crr->cpool.orig_allctr);
+ HARD_CHECK_TREE(NULL, 0, *root, 0);
/* Link carrier in address order tree
*/
- rbt_insert(AOFF_AOFF, root, &crr->rbt_node);
+ rbt_insert(alc->crr_order, root, &crr->cpool.pooled);
HARD_CHECK_TREE(NULL, 0, *root, 0);
}
static void aoff_remove_mbc(Allctr_t *allctr, Carrier_t *carrier)
{
- AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
- AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
- AOFF_RBTree_t **root = &alc->mbc_root;
+ AOFF_RBTree_t **root = &((AOFFAllctr_t*)allctr)->mbc_root;
+ AOFF_Carrier_t *crr = (AOFF_Carrier_t*)carrier;
ASSERT(allctr == ERTS_ALC_CARRIER_TO_ALLCTR(carrier));
if (!IS_CRR_IN_TREE(crr,*root))
- return;
+ return;
- HARD_CHECK_TREE(NULL, 0, *root, 0);
+ HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
rbt_delete(root, &crr->rbt_node);
crr->rbt_node.parent = NULL;
@@ -939,9 +1004,27 @@ static void aoff_remove_mbc(Allctr_t *allctr, Carrier_t *carrier)
crr->rbt_node.right = NULL;
crr->rbt_node.max_sz = crr->rbt_node.hdr.bhdr;
- HARD_CHECK_TREE(NULL, 0, *root, 0);
+ HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
+}
+
+void aoff_remove_pooled_mbc(Allctr_t *allctr, Carrier_t *crr)
+{
+ ASSERT(allctr == crr->cpool.orig_allctr);
+
+ HARD_CHECK_TREE(NULL, 0, allctr->cpool.pooled_tree, 0);
+
+ rbt_delete(&allctr->cpool.pooled_tree, &crr->cpool.pooled);
+#ifdef DEBUG
+ crr->cpool.pooled.parent = NULL;
+ crr->cpool.pooled.left = NULL;
+ crr->cpool.pooled.right = NULL;
+ crr->cpool.pooled.max_sz = 0;
+#endif
+ HARD_CHECK_TREE(NULL, 0, allctr->cpool.pooled_tree, 0);
+
}
+
static UWord aoff_largest_fblk_in_mbc(Allctr_t* allctr, Carrier_t* carrier)
{
AOFF_Carrier_t *crr = (AOFF_Carrier_t*) carrier;
@@ -955,47 +1038,35 @@ static UWord aoff_largest_fblk_in_mbc(Allctr_t* allctr, Carrier_t* carrier)
* info_options()
*/
+static const char* flavor_str[2][3] = {
+ {"ageffcaoff", "ageffcaobf", "ageffcbf"},
+ { "aoff", "aoffcaobf", "aoffcbf"}
+};
+static Eterm flavor_atoms[2][3];
+
static struct {
Eterm as;
- Eterm aoff;
- Eterm aoffcaobf;
- Eterm aoffcbf;
-#ifdef DEBUG
- Eterm end_of_atoms;
-#endif
} am;
-static void ERTS_INLINE atom_init(Eterm *atom, char *name)
+static void ERTS_INLINE atom_init(Eterm *atom, const char *name)
{
- *atom = am_atom_put(name, strlen(name));
+ *atom = am_atom_put(name, sys_strlen(name));
}
#define AM_INIT(AM) atom_init(&am.AM, #AM)
static void
init_atoms(void)
{
-#ifdef DEBUG
- Eterm *atom;
-#endif
+ int i, j;
if (atoms_initialized)
return;
-#ifdef DEBUG
- for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
- *atom = THE_NON_VALUE;
- }
-#endif
AM_INIT(as);
- AM_INIT(aoff);
- AM_INIT(aoffcaobf);
- AM_INIT(aoffcbf);
-#ifdef DEBUG
- for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
- ASSERT(*atom != THE_NON_VALUE);
- }
-#endif
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 3; j++)
+ atom_init(&flavor_atoms[i][j], flavor_str[i][j]);
atoms_initialized = 1;
}
@@ -1014,22 +1085,23 @@ add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
static Eterm
info_options(Allctr_t *allctr,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
{
AOFFAllctr_t* alc = (AOFFAllctr_t*) allctr;
Eterm res = THE_NON_VALUE;
- const char* flavor_str[3] = {"aoff", "aoffcaobf", "aoffcbf"};
- Eterm flavor_atom[3] = {am.aoff, am.aoffcaobf, am.aoffcbf};
+
+ ASSERT(alc->crr_order >= 0 && alc->crr_order <= 1);
+ ASSERT(alc->blk_order >= 1 && alc->blk_order <= 3);
if (print_to_p) {
erts_print(*print_to_p,
print_to_arg,
"%sas: %s\n",
prefix,
- flavor_str[alc->flavor]);
+ flavor_str[alc->crr_order][alc->blk_order-1]);
}
if (hpp || szp) {
@@ -1039,7 +1111,8 @@ info_options(Allctr_t *allctr,
__FILE__, __LINE__);;
res = NIL;
- add_2tup(hpp, szp, &res, am.as, flavor_atom[alc->flavor]);
+ add_2tup(hpp, szp, &res, am.as,
+ flavor_atoms[alc->crr_order][alc->blk_order-1]);
}
return res;
@@ -1057,7 +1130,7 @@ UWord
erts_aoffalc_test(UWord op, UWord a1, UWord a2)
{
switch (op) {
- case 0x500: return (UWord) ((AOFFAllctr_t *) a1)->flavor == AOFF_AOBF;
+ case 0x500: return (UWord) ((AOFFAllctr_t *) a1)->blk_order == FF_AOBF;
case 0x501: {
AOFF_RBTree_t *node = ((AOFFAllctr_t *) a1)->mbc_root;
Uint size = (Uint) a2;
@@ -1072,7 +1145,7 @@ erts_aoffalc_test(UWord op, UWord a1, UWord a2)
case 0x507: return (UWord) IS_TREE_NODE((AOFF_RBTree_t *) a1);
case 0x508: return (UWord) 0; /* IS_BF_ALGO */
case 0x509: return (UWord) ((AOFF_RBTree_t *) a1)->max_sz;
- case 0x50a: return (UWord) ((AOFFAllctr_t *) a1)->flavor == AOFF_BF;
+ case 0x50a: return (UWord) ((AOFFAllctr_t *) a1)->blk_order == FF_BF;
case 0x50b: return (UWord) LIST_PREV(a1);
default: ASSERT(0); return ~((UWord) 0);
}
@@ -1085,12 +1158,13 @@ erts_aoffalc_test(UWord op, UWord a1, UWord a2)
#ifdef HARD_DEBUG
-
-static int rbt_assert_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node)
+static int rbt_is_member(AOFF_RBTree_t* root, AOFF_RBTree_t* node)
{
while (node != root) {
- ASSERT(node->parent);
- ASSERT(node->parent->left == node || node->parent->right == node);
+ if (!node->parent || (node->parent->left != node &&
+ node->parent->right != node)) {
+ return 0;
+ }
node = node->parent;
}
return 1;
@@ -1132,7 +1206,7 @@ static void print_tree(AOFF_RBTree_t*);
*/
static AOFF_RBTree_t *
-check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root, Uint size)
+check_tree(Carrier_t* within_crr, enum AOFFSortOrder order, AOFF_RBTree_t* root, Uint size)
{
AOFF_RBTree_t *res = NULL;
Sint blacks;
@@ -1144,7 +1218,8 @@ check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root,
#ifdef PRINT_TREE
print_tree(root);
#endif
- ASSERT(within_crr || flavor == AOFF_AOFF);
+ ASSERT((within_crr && order >= FF_AOFF) ||
+ (!within_crr && order <= FF_AOFF));
if (!root)
return res;
@@ -1202,7 +1277,7 @@ check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root,
ASSERT(((char*)x + AOFF_BLK_SZ(x)) <= ((char*)crr + CARRIER_SZ(crr)));
}
- if (flavor == AOFF_BF) {
+ if (order == FF_BF) {
AOFF_RBTree_t* y = x;
AOFF_RBTree_t* nxt = LIST_NEXT(y);
ASSERT(IS_TREE_NODE(x));
@@ -1225,13 +1300,13 @@ check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root,
if (x->left) {
ASSERT(x->left->parent == x);
- ASSERT(cmp_blocks(flavor, x->left, x) < 0);
+ ASSERT(cmp_blocks(order, x->left, x) < 0);
ASSERT(x->left->max_sz <= x->max_sz);
}
if (x->right) {
ASSERT(x->right->parent == x);
- ASSERT(cmp_blocks(flavor, x->right, x) > 0);
+ ASSERT(cmp_blocks(order, x->right, x) > 0);
ASSERT(x->right->max_sz <= x->max_sz);
}
ASSERT(x->max_sz >= AOFF_BLK_SZ(x));
@@ -1240,7 +1315,7 @@ check_tree(Carrier_t* within_crr, enum AOFF_Flavor flavor, AOFF_RBTree_t* root,
|| x->max_sz == (x->right ? x->right->max_sz : 0));
if (size && AOFF_BLK_SZ(x) >= size) {
- if (!res || cmp_blocks(flavor, x, res) < 0) {
+ if (!res || cmp_blocks(order, x, res) < 0) {
res = x;
}
}
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h
index 7349c6ab19..68df9e0a49 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.h
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -28,14 +28,16 @@
typedef struct AOFFAllctr_t_ AOFFAllctr_t;
-enum AOFF_Flavor {
- AOFF_AOFF = 0,
- AOFF_AOBF = 1,
- AOFF_BF = 2
+enum AOFFSortOrder {
+ FF_AGEFF = 0, /* carrier trees only */
+ FF_AOFF = 1,
+ FF_AOBF = 2, /* block trees only */
+ FF_BF = 3 /* block trees only */
};
typedef struct {
- enum AOFF_Flavor flavor;
+ enum AOFFSortOrder blk_order;
+ enum AOFFSortOrder crr_order;
} AOFFAllctrInit_t;
#define ERTS_DEFAULT_AOFF_ALLCTR_INIT {0/*dummy*/}
@@ -53,12 +55,12 @@ Allctr_t *erts_aoffalc_start(AOFFAllctr_t *, AOFFAllctrInit_t*, AllctrInit_t *);
#define GET_ERL_ALLOC_UTIL_IMPL
#include "erl_alloc_util.h"
-
struct AOFFAllctr_t_ {
Allctr_t allctr; /* Has to be first! */
struct AOFF_RBTree_t_* mbc_root;
- enum AOFF_Flavor flavor;
+ enum AOFFSortOrder blk_order;
+ enum AOFFSortOrder crr_order;
};
UWord erts_aoffalc_test(UWord, UWord, UWord);
diff --git a/erts/emulator/beam/erl_arith.c b/erts/emulator/beam/erl_arith.c
index 861532f241..144fb56ea5 100644
--- a/erts/emulator/beam/erl_arith.c
+++ b/erts/emulator/beam/erl_arith.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -114,7 +114,7 @@ BIF_RETTYPE intdiv_2(BIF_ALIST_2)
}
if (is_both_small(BIF_ARG_1,BIF_ARG_2)){
Sint ires = signed_val(BIF_ARG_1) / signed_val(BIF_ARG_2);
- if (MY_IS_SSMALL(ires))
+ if (IS_SSMALL(ires))
BIF_RET(make_small(ires));
}
BIF_RET(erts_int_div(BIF_P, BIF_ARG_1, BIF_ARG_2));
@@ -276,8 +276,12 @@ shift(Process* p, Eterm arg1, Eterm arg2, int right)
goto do_bsl;
} else if (is_small(arg1) || is_big(arg1)) {
/*
- * N bsl PositiveBigNum is too large to represent.
+ * N bsl PositiveBigNum is too large to represent,
+ * unless N is 0.
*/
+ if (arg1 == make_small(0)) {
+ BIF_RET(arg1);
+ }
BIF_ERROR(p, SYSTEM_LIMIT);
}
/* Fall through if the left argument is not an integer. */
@@ -336,8 +340,7 @@ erts_mixed_plus(Process* p, Eterm arg1, Eterm arg2)
switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
ires = signed_val(arg1) + signed_val(arg2);
- ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
- if (MY_IS_SSMALL(ires)) {
+ if (IS_SSMALL(ires)) {
return make_small(ires);
} else {
hp = HAlloc(p, 2);
@@ -482,8 +485,7 @@ erts_mixed_minus(Process* p, Eterm arg1, Eterm arg2)
switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
ires = signed_val(arg1) - signed_val(arg2);
- ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
- if (MY_IS_SSMALL(ires)) {
+ if (IS_SSMALL(ires)) {
return make_small(ires);
} else {
hp = HAlloc(p, 2);
@@ -1177,8 +1179,7 @@ erts_gc_mixed_plus(Process* p, Eterm* reg, Uint live)
switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
ires = signed_val(arg1) + signed_val(arg2);
- ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
- if (MY_IS_SSMALL(ires)) {
+ if (IS_SSMALL(ires)) {
return make_small(ires);
} else {
if (ERTS_NEED_GC(p, 2)) {
@@ -1345,8 +1346,7 @@ erts_gc_mixed_minus(Process* p, Eterm* reg, Uint live)
switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
ires = signed_val(arg1) - signed_val(arg2);
- ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
- if (MY_IS_SSMALL(ires)) {
+ if (IS_SSMALL(ires)) {
return make_small(ires);
} else {
if (ERTS_NEED_GC(p, 2)) {
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index 84254af0c2..44655ad5df 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -34,9 +34,6 @@
#define ERTS_ASYNC_PRINT_JOB 0
-#if !defined(ERTS_SMP) && defined(USE_THREADS) && !ERTS_USE_ASYNC_READY_Q
-# error "Need async ready queue in non-smp case"
-#endif
typedef struct _erl_async {
DE_Handle* hndl; /* The DE_Handle is needed when port is gone */
@@ -46,16 +43,13 @@ typedef struct _erl_async {
ErlDrvPDL pdl;
void (*async_invoke)(void*);
void (*async_free)(void*);
-#if ERTS_USE_ASYNC_READY_Q
Uint sched_id;
union {
ErtsThrQPrepEnQ_t *prep_enq;
ErtsThrQFinDeQ_t fin_deq;
} q;
-#endif
} ErtsAsync;
-#if ERTS_USE_ASYNC_READY_Q
/*
* We can do without the enqueue mutex since it isn't needed for
@@ -94,7 +88,6 @@ typedef union {
char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncReadyQ))];
} ErtsAlgndAsyncReadyQ;
-#endif /* ERTS_USE_ASYNC_READY_Q */
typedef struct {
ErtsThrQ_t thr_q;
@@ -119,12 +112,10 @@ typedef struct {
char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncInit))];
} init;
ErtsAlgndAsyncQ *queue;
-#if ERTS_USE_ASYNC_READY_Q
ErtsAlgndAsyncReadyQ *ready_queue;
-#endif
} ErtsAsyncData;
-#if defined(USE_THREADS) && defined(USE_VM_PROBES)
+#if defined(USE_VM_PROBES)
/*
* Some compilers, e.g. GCC 4.2.1 and -O3, will optimize away DTrace
@@ -140,15 +131,6 @@ int erts_async_thread_suggested_stack_size; /* Initialized by erl_init.c */
static ErtsAsyncData *async;
-#ifndef USE_THREADS
-
-void
-erts_init_async(void)
-{
-
-}
-
-#else
static void *async_main(void *);
@@ -158,7 +140,6 @@ async_q(int i)
return &async->queue[i].aq;
}
-#if ERTS_USE_ASYNC_READY_Q
static ERTS_INLINE ErtsAsyncReadyQ *
async_ready_q(Uint sched_id)
@@ -166,16 +147,13 @@ async_ready_q(Uint sched_id)
return &async->ready_queue[((int)sched_id)-1].arq;
}
-#endif
void
erts_init_async(void)
{
async = NULL;
if (erts_async_max_threads > 0) {
-#if ERTS_USE_ASYNC_READY_Q
ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
-#endif
erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
char *ptr, thr_name[16];
size_t tot_size = 0;
@@ -183,9 +161,7 @@ erts_init_async(void)
tot_size += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
tot_size += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;
-#if ERTS_USE_ASYNC_READY_Q
tot_size += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers;
-#endif
ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_ASYNC_DATA,
tot_size);
@@ -194,14 +170,14 @@ erts_init_async(void)
ptr += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
async->init.data.no_initialized = 0;
- erts_mtx_init(&async->init.data.mtx, "async_init_mtx");
+ erts_mtx_init(&async->init.data.mtx, "async_init_mtx", NIL,
+ ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
erts_cnd_init(&async->init.data.cnd);
erts_atomic_init_nob(&async->init.data.id, 0);
async->queue = (ErtsAlgndAsyncQ *) ptr;
ptr += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;
-#if ERTS_USE_ASYNC_READY_Q
qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
@@ -213,14 +189,14 @@ erts_init_async(void)
for (i = 1; i <= erts_no_schedulers; i++) {
ErtsAsyncReadyQ *arq = async_ready_q(i);
#if ERTS_USE_ASYNC_READY_ENQ_MTX
- erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx");
+ erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
#endif
erts_thr_q_finalize_dequeue_state_init(&arq->fin_deq);
qinit.arg = (void *) (SWord) i;
erts_thr_q_initialize(&arq->thr_q, &qinit);
}
-#endif
/* Create async threads... */
@@ -251,7 +227,6 @@ erts_init_async(void)
}
}
-#if ERTS_USE_ASYNC_READY_Q
void *
erts_get_async_ready_queue(Uint sched_id)
@@ -259,7 +234,6 @@ erts_get_async_ready_queue(Uint sched_id)
return (void *) async ? async_ready_q(sched_id) : NULL;
}
-#endif
static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
{
@@ -268,10 +242,8 @@ static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
#endif
if (is_internal_port(a->port)) {
-#if ERTS_USE_ASYNC_READY_Q
ErtsAsyncReadyQ *arq = async_ready_q(a->sched_id);
a->q.prep_enq = erts_thr_q_prepare_enqueue(&arq->thr_q);
-#endif
/* make sure the driver will stay around */
if (a->hndl)
erts_ddll_reference_referenced_driver(a->hndl);
@@ -307,10 +279,8 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_tse_t *tse,
ErtsThrQPrepEnQ_t **prep_enq)
{
-#if ERTS_USE_ASYNC_READY_Q
int saved_fin_deq = 0;
ErtsThrQFinDeQ_t fin_deq;
-#endif
#ifdef USE_VM_PROBES
int len;
#endif
@@ -319,12 +289,10 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q);
if (a) {
-#if ERTS_USE_ASYNC_READY_Q
*prep_enq = a->q.prep_enq;
erts_thr_q_get_finalize_dequeue_data(q, &a->q.fin_deq);
if (saved_fin_deq)
erts_thr_q_append_finalize_dequeue_data(&a->q.fin_deq, &fin_deq);
-#endif
#ifdef USE_LTTNG_VM_TRACEPOINTS
if (LTTNG_ENABLED(aio_pool_get)) {
lttng_decl_portbuf(port_str);
@@ -352,7 +320,6 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_tse_reset(tse);
-#if ERTS_USE_ASYNC_READY_Q
chk_fin_deq:
if (erts_thr_q_get_finalize_dequeue_data(q, &tmp_fin_deq)) {
if (!saved_fin_deq) {
@@ -362,16 +329,14 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_thr_q_append_finalize_dequeue_data(&fin_deq,
&tmp_fin_deq);
}
-#endif
switch (erts_thr_q_inspect(q, 1)) {
case ERTS_THR_Q_DIRTY:
break;
case ERTS_THR_Q_NEED_THR_PRGR:
-#ifdef ERTS_SMP
{
ErtsThrPrgrVal prgr = erts_thr_q_need_thr_progress(q);
- erts_thr_progress_wakeup(NULL, prgr);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(NULL), prgr);
/*
* We do no dequeue finalizing in hope that a new async
* job will arrive before we are woken due to thread
@@ -380,17 +345,14 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_tse_wait(tse);
break;
}
-#endif
case ERTS_THR_Q_CLEAN:
-#if ERTS_USE_ASYNC_READY_Q
if (saved_fin_deq) {
if (erts_thr_q_finalize_dequeue(&fin_deq))
goto chk_fin_deq;
else
saved_fin_deq = 0;
}
-#endif
erts_tse_wait(tse);
break;
@@ -406,15 +368,10 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
static ERTS_INLINE void call_async_ready(ErtsAsync *a)
{
-#if ERTS_USE_ASYNC_READY_Q
Port *p = erts_id2port_sflgs(a->port,
NULL,
0,
ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
-#else
- Port *p = erts_thr_id2port_sflgs(a->port,
- ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
-#endif
if (!p) {
if (a->async_free) {
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_PORT);
@@ -430,11 +387,7 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a)
ERTS_MSACC_POP_STATE();
}
}
-#if ERTS_USE_ASYNC_READY_Q
erts_port_release(p);
-#else
- erts_thr_port_release(p);
-#endif
}
if (a->pdl)
driver_pdl_dec_refc(a->pdl);
@@ -444,7 +397,6 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a)
static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq)
{
-#if ERTS_USE_ASYNC_READY_Q
ErtsAsyncReadyQ *arq;
#if ERTS_ASYNC_PRINT_JOB
@@ -463,12 +415,6 @@ static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq)
erts_mtx_unlock(&arq->x.data.enq_mtx);
#endif
-#else /* ERTS_USE_ASYNC_READY_Q */
-
- call_async_ready(a);
- erts_free(ERTS_ALC_T_ASYNC, (void *) a);
-
-#endif /* ERTS_USE_ASYNC_READY_Q */
}
@@ -484,7 +430,6 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq)
erts_tse_t *tse = erts_tse_fetch();
ERTS_DECLARE_DUMMY(Uint no);
-#ifdef ERTS_SMP
ErtsThrPrgrCallbacks callbacks;
callbacks.arg = (void *) tse;
@@ -493,15 +438,12 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq)
callbacks.wait = NULL;
erts_thr_progress_register_unmanaged_thread(&callbacks);
-#endif
qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
qinit.arg = (void *) tse;
qinit.notify = async_wakeup;
-#if ERTS_USE_ASYNC_READY_Q
qinit.auto_finalize_dequeue = 0;
-#endif
erts_thr_q_initialize(&aq->thr_q, &qinit);
@@ -543,12 +485,10 @@ static void *async_main(void* arg)
return NULL;
}
-#endif /* USE_THREADS */
void
erts_exit_flush_async(void)
{
-#ifdef USE_THREADS
int i;
ErtsAsync a;
a.port = NIL;
@@ -562,11 +502,8 @@ erts_exit_flush_async(void)
async_add(&a, async_q(i));
for (i = 0; i < erts_async_max_threads; i++)
erts_thr_join(async->queue[i].aq.thr_id, NULL);
-#endif
}
-#if defined(USE_THREADS) && ERTS_USE_ASYNC_READY_Q
-
int erts_check_async_ready(void *varq)
{
ErtsAsyncReadyQ *arq = (ErtsAsyncReadyQ *) varq;
@@ -607,18 +544,15 @@ int erts_async_ready_clean(void *varq, void *val)
case ERTS_THR_Q_DIRTY:
return ERTS_ASYNC_READY_DIRTY;
case ERTS_THR_Q_NEED_THR_PRGR:
-#ifdef ERTS_SMP
*((ErtsThrPrgrVal *) val)
= erts_thr_q_need_thr_progress(&arq->thr_q);
return ERTS_ASYNC_READY_NEED_THR_PRGR;
-#endif
case ERTS_THR_Q_CLEAN:
break;
}
return ERTS_ASYNC_READY_CLEAN;
}
-#endif
/*
** Generate a fair async key prom an ErlDrvPort
@@ -656,28 +590,22 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
Port* prt;
long id;
unsigned int qix;
-#if ERTS_USE_ASYNC_READY_Q
Uint sched_id;
ERTS_MSACC_PUSH_STATE();
sched_id = erts_get_scheduler_id();
if (!sched_id)
sched_id = 1;
-#else
- ERTS_MSACC_PUSH_STATE();
-#endif
prt = erts_drvport2port(ix);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
a = (ErtsAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErtsAsync));
-#if ERTS_USE_ASYNC_READY_Q
a->sched_id = sched_id;
-#endif
a->hndl = (DE_Handle*)prt->drv_ptr->handle;
a->port = prt->common.id;
a->pdl = NULL;
@@ -707,7 +635,6 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
(*key % erts_async_max_threads) : 0;
*key = qix;
}
-#ifdef USE_THREADS
if (erts_async_max_threads > 0) {
if (prt->port_data_lock) {
driver_pdl_inc_refc(prt->port_data_lock);
@@ -716,7 +643,6 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
async_add(a, async_q(qix));
return id;
}
-#endif
ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT);
(*a->async_invoke)(a->async_data);
diff --git a/erts/emulator/beam/erl_async.h b/erts/emulator/beam/erl_async.h
index 473c7686e5..70ef247e0a 100644
--- a/erts/emulator/beam/erl_async.h
+++ b/erts/emulator/beam/erl_async.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,41 +27,14 @@ extern int erts_async_max_threads;
#define ERTS_ASYNC_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
extern int erts_async_thread_suggested_stack_size;
-#ifdef USE_THREADS
-
-#ifdef ERTS_SMP
-/*
- * With smp support we can choose to have, or not to
- * have an async ready queue.
- */
-#define ERTS_USE_ASYNC_READY_Q 1
-#endif
-
-#ifndef ERTS_SMP
-/* In non-smp case we *need* the async ready queue */
-# undef ERTS_USE_ASYNC_READY_Q
-# define ERTS_USE_ASYNC_READY_Q 1
-#endif
-
-#ifndef ERTS_USE_ASYNC_READY_Q
-# define ERTS_USE_ASYNC_READY_Q 0
-#endif
-
-#if ERTS_USE_ASYNC_READY_Q
int erts_check_async_ready(void *);
int erts_async_ready_clean(void *, void *);
void *erts_get_async_ready_queue(Uint sched_id);
#define ERTS_ASYNC_READY_CLEAN 0
#define ERTS_ASYNC_READY_DIRTY 1
-#ifdef ERTS_SMP
#define ERTS_ASYNC_READY_NEED_THR_PRGR 2
-#endif
-#endif /* ERTS_USE_ASYNC_READY_Q */
-
-#endif /* USE_THREADS */
void erts_init_async(void);
void erts_exit_flush_async(void);
-
#endif /* ERL_ASYNC_H__ */
diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c
index 379cee39a1..9cb1199c2a 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.c
+++ b/erts/emulator/beam/erl_bestfit_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -104,7 +104,7 @@ static void bf_link_free_block (Allctr_t *, Block_t *);
static ERTS_INLINE void bf_unlink_free_block (Allctr_t *, Block_t *);
-static Eterm info_options (Allctr_t *, char *, int *,
+static Eterm info_options (Allctr_t *, char *, fmtfn_t *,
void *, Uint **, Uint *);
static void init_atoms (void);
@@ -875,7 +875,7 @@ static struct {
static void ERTS_INLINE atom_init(Eterm *atom, char *name)
{
- *atom = am_atom_put(name, strlen(name));
+ *atom = am_atom_put(name, sys_strlen(name));
}
#define AM_INIT(AM) atom_init(&am.AM, #AM)
@@ -921,7 +921,7 @@ add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
static Eterm
info_options(Allctr_t *allctr,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
diff --git a/erts/emulator/beam/erl_bif_atomics.c b/erts/emulator/beam/erl_bif_atomics.c
new file mode 100644
index 0000000000..092dbb3bd3
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_atomics.c
@@ -0,0 +1,256 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: High performance atomics.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof */
+
+#include "sys.h"
+#include "export.h"
+#include "bif.h"
+#include "erl_threads.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+
+typedef struct
+{
+ int is_signed;
+ UWord vlen;
+ erts_atomic64_t v[1];
+}AtomicsRef;
+
+static int atomics_destructor(Binary *unused)
+{
+ return 1;
+}
+
+#define OPT_SIGNED (1 << 0)
+
+BIF_RETTYPE erts_internal_atomics_new_2(BIF_ALIST_2)
+{
+ AtomicsRef* p;
+ Binary* mbin;
+ UWord i, cnt, opts;
+ Uint bytes;
+ Eterm* hp;
+
+ if (!term_to_UWord(BIF_ARG_1, &cnt)
+ || cnt == 0
+ || !term_to_UWord(BIF_ARG_2, &opts)) {
+
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (cnt > (ERTS_UWORD_MAX / sizeof(p->v[0])))
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+
+ bytes = offsetof(AtomicsRef, v) + cnt*sizeof(p->v[0]);
+ mbin = erts_create_magic_binary_x(bytes,
+ atomics_destructor,
+ ERTS_ALC_T_ATOMICS,
+ 0);
+ p = ERTS_MAGIC_BIN_DATA(mbin);
+ p->is_signed = opts & OPT_SIGNED;
+ p->vlen = cnt;
+ for (i=0; i < cnt; i++)
+ erts_atomic64_init_nob(&p->v[i], 0);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(BIF_P), mbin);
+}
+
+static ERTS_INLINE int get_ref(Eterm ref, AtomicsRef** pp)
+{
+ Binary* mbin;
+ if (!is_internal_magic_ref(ref))
+ return 0;
+
+ mbin = erts_magic_ref2bin(ref);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != atomics_destructor)
+ return 0;
+ *pp = ERTS_MAGIC_BIN_DATA(mbin);
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_ix(Eterm ref, Eterm ix,
+ AtomicsRef** pp, UWord* ixp)
+{
+ return (get_ref(ref, pp)
+ && term_to_UWord(ix, ixp)
+ && --(*ixp) < (*pp)->vlen);
+}
+
+static ERTS_INLINE int get_value(AtomicsRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (p->is_signed ?
+ term_to_Sint64(term, (Sint64*)valp) :
+ term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE int get_incr(AtomicsRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (term_to_Sint64(term, (Sint64*)valp)
+ || term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE Eterm bld_atomic(Process* proc, AtomicsRef* p,
+ erts_aint64_t val)
+{
+ if (p->is_signed) {
+ if (IS_SSMALL(val))
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_sint64_to_big(val, &hp);
+ }
+ }
+ else {
+ if ((Uint64)val <= MAX_SMALL)
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_UINT64_HEAP_SIZE(val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_uint64_to_big(val, &hp);
+ }
+ }
+}
+
+BIF_RETTYPE atomics_put_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t val;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &val)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_atomic64_set_mb(&p->v[ix], val);
+ return am_ok;
+}
+
+BIF_RETTYPE atomics_get_2(BIF_ALIST_2)
+{
+ AtomicsRef* p;
+ UWord ix;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return bld_atomic(BIF_P, p, erts_atomic64_read_mb(&p->v[ix]));
+}
+
+BIF_RETTYPE atomics_add_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t incr;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_atomic64_add_mb(&p->v[ix], incr);
+ return am_ok;
+}
+
+BIF_RETTYPE atomics_add_get_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t incr;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return bld_atomic(BIF_P, p, erts_atomic64_add_read_mb(&p->v[ix], incr));
+}
+
+BIF_RETTYPE atomics_exchange_3(BIF_ALIST_3)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t desired, was;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &desired)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ was = erts_atomic64_xchg_mb(&p->v[ix], desired);
+ return bld_atomic(BIF_P, p, was);
+}
+
+BIF_RETTYPE atomics_compare_exchange_4(BIF_ALIST_4)
+{
+ AtomicsRef* p;
+ UWord ix;
+ erts_aint64_t expected, desired, was;
+
+ if (!get_ref_ix(BIF_ARG_1, BIF_ARG_2, &p, &ix)
+ || !get_value(p, BIF_ARG_3, &expected)
+ || !get_value(p, BIF_ARG_4, &desired)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ was = erts_atomic64_cmpxchg_mb(&p->v[ix], desired, expected);
+ return was == expected ? am_ok : bld_atomic(BIF_P, p, was);
+}
+
+BIF_RETTYPE atomics_info_1(BIF_ALIST_1)
+{
+ AtomicsRef* p;
+ Uint hsz = MAP4_SZ;
+ Eterm *hp;
+ Uint64 max;
+ Sint64 min;
+ UWord memory;
+ Eterm max_val, min_val, sz_val, mem_val;
+
+ if (!get_ref(BIF_ARG_1, &p))
+ BIF_ERROR(BIF_P, BADARG);
+
+ max = p->is_signed ? ERTS_SINT64_MAX : ERTS_UINT64_MAX;
+ min = p->is_signed ? ERTS_SINT64_MIN : 0;
+ memory = erts_magic_ref2bin(BIF_ARG_1)->orig_size;
+
+ erts_bld_uint64(NULL, &hsz, max);
+ erts_bld_sint64(NULL, &hsz, min);
+ erts_bld_uword(NULL, &hsz, p->vlen);
+ erts_bld_uword(NULL, &hsz, memory);
+
+ hp = HAlloc(BIF_P, hsz);
+ max_val = erts_bld_uint64(&hp, NULL, max);
+ min_val = erts_bld_sint64(&hp, NULL, min);
+ sz_val = erts_bld_uword(&hp, NULL, p->vlen);
+ mem_val = erts_bld_uword(&hp, NULL, memory);
+
+ return MAP4(hp, am_max, max_val,
+ am_memory, mem_val,
+ am_min, min_val,
+ am_size, sz_val);
+}
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 6e10980b6b..a2610bf2e1 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -61,8 +61,6 @@ static Export binary_longest_prefix_trap_export;
static BIF_RETTYPE binary_longest_prefix_trap(BIF_ALIST_3);
static Export binary_longest_suffix_trap_export;
static BIF_RETTYPE binary_longest_suffix_trap(BIF_ALIST_3);
-static Export binary_bin_to_list_trap_export;
-static BIF_RETTYPE binary_bin_to_list_trap(BIF_ALIST_3);
static Export binary_copy_trap_export;
static BIF_RETTYPE binary_copy_trap(BIF_ALIST_2);
static Uint max_loop_limit;
@@ -86,10 +84,6 @@ void erts_init_bif_binary(void)
am_erlang, am_binary_longest_suffix_trap, 3,
&binary_longest_suffix_trap);
- erts_init_trap_export(&binary_bin_to_list_trap_export,
- am_erlang, am_binary_bin_to_list_trap, 3,
- &binary_bin_to_list_trap);
-
erts_init_trap_export(&binary_copy_trap_export,
am_erlang, am_binary_copy_trap, 2,
&binary_copy_trap);
@@ -171,6 +165,16 @@ static void *my_alloc(MyAllocator *my, Uint size)
#define ALPHABET_SIZE 256
+typedef struct _findall_data {
+ Uint pos;
+ Uint len;
+#ifdef HARDDEBUG
+ Uint id;
+#endif
+ Eterm epos;
+ Eterm elen;
+} FindallData;
+
typedef struct _ac_node {
#ifdef HARDDEBUG
Uint32 id; /* To identify h pointer targets when
@@ -208,6 +212,103 @@ typedef struct _bm_data {
Sint badshift[ALPHABET_SIZE];
} BMData;
+typedef struct _ac_find_all_state {
+ ACNode *q;
+ Uint pos;
+ Uint len;
+ Uint m;
+ Uint allocated;
+ FindallData *out;
+} ACFindAllState;
+
+typedef struct _ac_find_first_state {
+ ACNode *q;
+ Uint pos;
+ Uint len;
+ ACNode *candidate;
+ Uint candidate_start;
+} ACFindFirstState;
+
+typedef struct _bm_find_all_state {
+ Sint pos;
+ Sint len;
+ Uint m;
+ Uint allocated;
+ FindallData *out;
+} BMFindAllState;
+
+typedef struct _bm_find_first_state {
+ Sint pos;
+ Sint len;
+} BMFindFirstState;
+
+typedef enum _bf_return {
+ BF_RESTART = -3,
+ BF_NOT_FOUND,
+ BF_BADARG,
+ BF_OK
+} BFReturn;
+
+typedef struct _binary_find_all_context {
+ ErtsHeapFactory factory;
+ Eterm term;
+ Sint head;
+ Sint tail;
+ Uint end_pos;
+ Uint size;
+ FindallData *data;
+ union {
+ ACFindAllState ac;
+ BMFindAllState bm;
+ } d;
+} BinaryFindAllContext;
+
+typedef struct _binary_find_first_context {
+ Uint pos;
+ Uint len;
+ union {
+ ACFindFirstState ac;
+ BMFindFirstState bm;
+ } d;
+} BinaryFindFirstContext;
+
+typedef struct _binary_find_context BinaryFindContext;
+
+typedef struct _binary_find_search {
+ void (*init) (BinaryFindContext *);
+ BFReturn (*find) (BinaryFindContext *, byte *);
+ void (*done) (BinaryFindContext *);
+} BinaryFindSearch;
+
+typedef Eterm (*BinaryFindResult)(Process *, Eterm, BinaryFindContext **);
+
+typedef enum _binary_find_state {
+ BFSearch,
+ BFResult,
+ BFDone
+} BinaryFindState;
+
+struct _binary_find_context {
+ Eterm pat_type;
+ Eterm pat_term;
+ Binary *pat_bin;
+ Uint flags;
+ Uint hsstart;
+ Uint hsend;
+ int loop_factor;
+ int exported;
+ Uint reds;
+ BinaryFindState state;
+ Eterm trap_term;
+ BinaryFindSearch *search;
+ BinaryFindResult not_found;
+ BinaryFindResult found;
+ union {
+ BinaryFindAllContext fa;
+ BinaryFindFirstContext ff;
+ } u;
+};
+
#ifdef HARDDEBUG
static void dump_bm_data(BMData *bm);
static void dump_ac_trie(ACTrie *act);
@@ -229,23 +330,16 @@ static void dump_ac_node(ACNode *node, int indent, int ch);
MYALIGN(sizeof(ACTrie))) /* Structure */
-#ifndef MAX
-#define MAX(A,B) (((A) > (B)) ? (A) : (B))
-#endif
-
-#ifndef MIN
-#define MIN(A,B) (((A) > (B)) ? (B) : (A))
-#endif
/*
* Callback for the magic binary
*/
-static void cleanup_my_data_ac(Binary *bp)
+static int cleanup_my_data_ac(Binary *bp)
{
- return;
+ return 1;
}
-static void cleanup_my_data_bm(Binary *bp)
+static int cleanup_my_data_bm(Binary *bp)
{
- return;
+ return 1;
}
/*
@@ -271,7 +365,7 @@ static ACTrie *create_acdata(MyAllocator *my, Uint len,
acn->d = 0;
acn->final = 0;
acn->h = NULL;
- memset(acn->g, 0, sizeof(ACNode *) * ALPHABET_SIZE);
+ sys_memset(acn->g, 0, sizeof(ACNode *) * ALPHABET_SIZE);
#ifdef HARDDEBUG
act->idc = 0;
acn->id = 0;
@@ -294,7 +388,7 @@ static BMData *create_bmdata(MyAllocator *my, byte *x, Uint len,
init_my_allocator(my, datasize, data);
bmd = my_alloc(my, sizeof(BMData));
bmd->x = my_alloc(my,len);
- memcpy(bmd->x,x,len);
+ sys_memcpy(bmd->x,x,len);
bmd->len = len;
bmd->goodshift = my_alloc(my,sizeof(Uint) * len);
*the_bin = mb;
@@ -339,7 +433,7 @@ static void ac_add_one_pattern(MyAllocator *my, ACTrie *act, byte *x, Uint len)
nn->d = i+1;
nn->h = act->root;
nn->final = 0;
- memset(nn->g, 0, sizeof(ACNode *) * ALPHABET_SIZE);
+ sys_memset(nn->g, 0, sizeof(ACNode *) * ALPHABET_SIZE);
acn->g[x[i]] = nn;
++i;
acn = nn;
@@ -380,7 +474,7 @@ static void ac_compute_failure_functions(ACTrie *act, ACNode **qbuff)
qbuff[qt++] = child;
/* Search for correct failure function, follow the parent's
failure function until you find a similar transition
- funtion to this child's */
+ function to this child's */
r = parent->h;
while (r != NULL && r->g[i] == NULL) {
r = r->h;
@@ -421,32 +515,25 @@ static void ac_compute_failure_functions(ACTrie *act, ACNode **qbuff)
* Basic AC finds the first end before the first start...
*
*/
-typedef struct {
- ACNode *q;
- Uint pos;
- Uint len;
- ACNode *candidate;
- Uint candidate_start;
-} ACFindFirstState;
-
-
-static void ac_init_find_first_match(ACFindFirstState *state, ACTrie *act, Sint startpos, Uint len)
+static void ac_init_find_first_match(BinaryFindContext *ctx)
{
+ ACFindFirstState *state = &(ctx->u.ff.d.ac);
+ ACTrie *act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
state->q = act->root;
- state->pos = startpos;
- state->len = len;
+ state->pos = ctx->hsstart;
+ state->len = ctx->hsend;
state->candidate = NULL;
state->candidate_start = 0;
}
-#define AC_OK 0
-#define AC_NOT_FOUND -1
-#define AC_RESTART -2
#define AC_LOOP_FACTOR 10
-static int ac_find_first_match(ACFindFirstState *state, byte *haystack,
- Uint *mpos, Uint *mlen, Uint *reductions)
+static BFReturn ac_find_first_match(BinaryFindContext *ctx, byte *haystack)
{
+ ACFindFirstState *state = &(ctx->u.ff.d.ac);
+ Uint *mpos = &(ctx->u.ff.pos);
+ Uint *mlen = &(ctx->u.ff.len);
+ Uint *reductions = &(ctx->reds);
ACNode *q = state->q;
Uint i = state->pos;
ACNode *candidate = state->candidate, *r;
@@ -462,7 +549,7 @@ static int ac_find_first_match(ACFindFirstState *state, byte *haystack,
state->len = len;
state->candidate = candidate;
state->candidate_start = candidate_start;
- return AC_RESTART;
+ return BF_RESTART;
}
while (q->g[haystack[i]] == NULL && q->h != q) {
@@ -492,68 +579,33 @@ static int ac_find_first_match(ACFindFirstState *state, byte *haystack,
}
*reductions = reds;
if (!candidate) {
- return AC_NOT_FOUND;
+ return BF_NOT_FOUND;
}
#ifdef HARDDEBUG
dump_ac_node(candidate,0,'?');
#endif
*mpos = candidate_start;
*mlen = candidate->d;
- return AC_OK;
+ return BF_OK;
}
-typedef struct _findall_data {
- Uint pos;
- Uint len;
-#ifdef HARDDEBUG
- Uint id;
-#endif
- Eterm epos;
- Eterm elen;
-} FindallData;
-
-typedef struct {
- ACNode *q;
- Uint pos;
- Uint len;
- Uint m;
- Uint allocated;
- FindallData *out;
-} ACFindAllState;
-
-static void ac_init_find_all(ACFindAllState *state, ACTrie *act, Sint startpos, Uint len)
+static void ac_init_find_all(BinaryFindContext *ctx)
{
+ ACFindAllState *state = &(ctx->u.fa.d.ac);
+ ACTrie *act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
state->q = act->root;
- state->pos = startpos;
- state->len = len;
+ state->pos = ctx->hsstart;
+ state->len = ctx->hsend;
state->m = 0;
state->allocated = 0;
state->out = NULL;
}
-static void ac_restore_find_all(ACFindAllState *state,
- const ACFindAllState *src)
-{
- memcpy(state, src, sizeof(ACFindAllState));
- if (state->allocated > 0) {
- state->out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * (state->allocated));
- memcpy(state->out, src+1, sizeof(FindallData)*state->m);
- } else {
- state->out = NULL;
- }
-}
-
-static void ac_serialize_find_all(const ACFindAllState *state,
- ACFindAllState *dst)
-{
- memcpy(dst, state, sizeof(ACFindAllState));
- memcpy(dst+1, state->out, sizeof(FindallData)*state->m);
-}
-
-static void ac_clean_find_all(ACFindAllState *state)
+static void ac_clean_find_all(BinaryFindContext *ctx)
{
+ ACFindAllState *state = &(ctx->u.fa.d.ac);
if (state->out != NULL) {
- erts_free(ERTS_ALC_T_TMP, state->out);
+ erts_free(ERTS_ALC_T_BINARY_FIND, state->out);
}
#ifdef HARDDEBUG
state->out = NULL;
@@ -565,9 +617,10 @@ static void ac_clean_find_all(ACFindAllState *state)
* Differs to the find_first function in that it stores all matches and the values
* arte returned only in the state.
*/
-static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack,
- Uint *reductions)
+static BFReturn ac_find_all_non_overlapping(BinaryFindContext *ctx, byte *haystack)
{
+ ACFindAllState *state = &(ctx->u.fa.d.ac);
+ Uint *reductions = &(ctx->reds);
ACNode *q = state->q;
Uint i = state->pos;
Uint rstart;
@@ -578,7 +631,6 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack,
FindallData *out = state->out;
register Uint reds = *reductions;
-
while (i < len) {
if (--reds == 0) {
state->q = q;
@@ -587,7 +639,7 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack,
state->m = m;
state->allocated = allocated;
state->out = out;
- return AC_RESTART;
+ return BF_RESTART;
}
while (q->g[haystack[i]] == NULL && q->h != q) {
q = q->h;
@@ -625,11 +677,11 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack,
if (m >= allocated) {
if (!allocated) {
allocated = 10;
- out = erts_alloc(ERTS_ALC_T_TMP,
+ out = erts_alloc(ERTS_ALC_T_BINARY_FIND,
sizeof(FindallData) * allocated);
} else {
allocated *= 2;
- out = erts_realloc(ERTS_ALC_T_TMP, out,
+ out = erts_realloc(ERTS_ALC_T_BINARY_FIND, out,
sizeof(FindallData) *
allocated);
}
@@ -656,7 +708,7 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack,
*reductions = reds;
state->m = m;
state->out = out;
- return (m == 0) ? AC_NOT_FOUND : AC_OK;
+ return (m == 0) ? BF_NOT_FOUND : BF_OK;
}
/*
@@ -743,27 +795,22 @@ static void compute_goodshifts(BMData *bmd)
erts_free(ERTS_ALC_T_TMP, suffixes);
}
-typedef struct {
- Sint pos;
- Sint len;
-} BMFindFirstState;
-
-#define BM_OK 0 /* used only for find_all */
-#define BM_NOT_FOUND -1
-#define BM_RESTART -2
#define BM_LOOP_FACTOR 10 /* Should we have a higher value? */
-static void bm_init_find_first_match(BMFindFirstState *state, Sint startpos,
- Uint len)
+static void bm_init_find_first_match(BinaryFindContext *ctx)
{
- state->pos = startpos;
- state->len = (Sint) len;
+ BMFindFirstState *state = &(ctx->u.ff.d.bm);
+ state->pos = ctx->hsstart;
+ state->len = ctx->hsend;
}
-
-static Sint bm_find_first_match(BMFindFirstState *state, BMData *bmd,
- byte *haystack, Uint *reductions)
+static BFReturn bm_find_first_match(BinaryFindContext *ctx, byte *haystack)
{
+ BMFindFirstState *state = &(ctx->u.ff.d.bm);
+ BMData *bmd = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
+ Uint *mpos = &(ctx->u.ff.pos);
+ Uint *mlen = &(ctx->u.ff.len);
+ Uint *reductions = &(ctx->reds);
Sint blen = bmd->len;
Sint len = state->len;
Sint *gs = bmd->goodshift;
@@ -776,61 +823,37 @@ static Sint bm_find_first_match(BMFindFirstState *state, BMData *bmd,
while (j <= len - blen) {
if (--reds == 0) {
state->pos = j;
- return BM_RESTART;
+ return BF_RESTART;
}
for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i)
;
if (i < 0) { /* found */
*reductions = reds;
- return j;
+ *mpos = (Uint) j;
+ *mlen = (Uint) blen;
+ return BF_OK;
}
j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i);
}
*reductions = reds;
- return BM_NOT_FOUND;
+ return BF_NOT_FOUND;
}
-typedef struct {
- Sint pos;
- Sint len;
- Uint m;
- Uint allocated;
- FindallData *out;
-} BMFindAllState;
-
-static void bm_init_find_all(BMFindAllState *state, Sint startpos, Uint len)
+static void bm_init_find_all(BinaryFindContext *ctx)
{
- state->pos = startpos;
- state->len = (Sint) len;
+ BMFindAllState *state = &(ctx->u.fa.d.bm);
+ state->pos = ctx->hsstart;
+ state->len = ctx->hsend;
state->m = 0;
state->allocated = 0;
state->out = NULL;
}
-static void bm_restore_find_all(BMFindAllState *state,
- const BMFindAllState *src)
-{
- memcpy(state, src, sizeof(BMFindAllState));
- if (state->allocated > 0) {
- state->out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) *
- (state->allocated));
- memcpy(state->out, src+1, sizeof(FindallData)*state->m);
- } else {
- state->out = NULL;
- }
-}
-
-static void bm_serialize_find_all(const BMFindAllState *state,
- BMFindAllState *dst)
-{
- memcpy(dst, state, sizeof(BMFindAllState));
- memcpy(dst+1, state->out, sizeof(FindallData)*state->m);
-}
-
-static void bm_clean_find_all(BMFindAllState *state)
+static void bm_clean_find_all(BinaryFindContext *ctx)
{
+ BMFindAllState *state = &(ctx->u.fa.d.bm);
if (state->out != NULL) {
- erts_free(ERTS_ALC_T_TMP, state->out);
+ erts_free(ERTS_ALC_T_BINARY_FIND, state->out);
}
#ifdef HARDDEBUG
state->out = NULL;
@@ -842,10 +865,11 @@ static void bm_clean_find_all(BMFindAllState *state)
* Differs to the find_first function in that it stores all matches and the
* values are returned only in the state.
*/
-static Sint bm_find_all_non_overlapping(BMFindAllState *state,
- BMData *bmd, byte *haystack,
- Uint *reductions)
+static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haystack)
{
+ BMFindAllState *state = &(ctx->u.fa.d.bm);
+ BMData *bmd = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
+ Uint *reductions = &(ctx->reds);
Sint blen = bmd->len;
Sint len = state->len;
Sint *gs = bmd->goodshift;
@@ -864,7 +888,7 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state,
state->m = m;
state->allocated = allocated;
state->out = out;
- return BM_RESTART;
+ return BF_RESTART;
}
for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i)
;
@@ -872,10 +896,11 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state,
if (m >= allocated) {
if (!allocated) {
allocated = 10;
- out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * allocated);
+ out = erts_alloc(ERTS_ALC_T_BINARY_FIND,
+ sizeof(FindallData) * allocated);
} else {
allocated *= 2;
- out = erts_realloc(ERTS_ALC_T_TMP, out,
+ out = erts_realloc(ERTS_ALC_T_BINARY_FIND, out,
sizeof(FindallData) * allocated);
}
}
@@ -890,7 +915,7 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state,
state->m = m;
state->out = out;
*reductions = reds;
- return (m == 0) ? BM_NOT_FOUND : BM_OK;
+ return (m == 0) ? BF_NOT_FOUND : BF_OK;
}
/*
@@ -1010,57 +1035,166 @@ BIF_RETTYPE binary_compile_pattern_1(BIF_ALIST_1)
if (do_binary_match_compile(BIF_ARG_1,&tag,&bin)) {
BIF_ERROR(BIF_P,BADARG);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE+3);
- ret = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), bin);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE+3);
+ ret = erts_mk_magic_ref(&hp, &MSO(BIF_P), bin);
ret = TUPLE2(hp, tag, ret);
BIF_RET(ret);
}
-#define DO_BIN_MATCH_OK 0
-#define DO_BIN_MATCH_BADARG -1
-#define DO_BIN_MATCH_RESTART -2
+#define BF_FLAG_GLOBAL 0x01
+#define BF_FLAG_SPLIT_TRIM 0x02
+#define BF_FLAG_SPLIT_TRIM_ALL 0x04
-#define BINARY_FIND_ALL 0x01
-#define BINARY_SPLIT_TRIM 0x02
-#define BINARY_SPLIT_TRIM_ALL 0x04
+static void bf_context_init(BinaryFindContext *ctx, BinaryFindResult not_found,
+ BinaryFindResult single, BinaryFindResult global,
+ Binary *pat_bin);
+static BinaryFindContext *bf_context_export(Process *p, BinaryFindContext *src);
+static int bf_context_destructor(Binary *ctx_bin);
+#ifdef HARDDEBUG
+static void bf_context_dump(BinaryFindContext *ctx);
+#endif
-typedef struct BinaryFindState {
- Eterm type;
- Uint flags;
- Uint hsstart;
- Uint hsend;
- Eterm (*not_found_result) (Process *, Eterm, struct BinaryFindState *);
- Eterm (*single_result) (Process *, Eterm, struct BinaryFindState *, Sint, Sint);
- Eterm (*global_result) (Process *, Eterm, struct BinaryFindState *, FindallData *, Uint);
-} BinaryFindState;
+static BinaryFindSearch bf_search_ac_global = {
+ ac_init_find_all,
+ ac_find_all_non_overlapping,
+ ac_clean_find_all
+};
+
+static BinaryFindSearch bf_search_ac_single = {
+ ac_init_find_first_match,
+ ac_find_first_match,
+ NULL
+};
+
+static BinaryFindSearch bf_search_bm_global = {
+ bm_init_find_all,
+ bm_find_all_non_overlapping,
+ bm_clean_find_all
+};
+
+static BinaryFindSearch bf_search_bm_single = {
+ bm_init_find_first_match,
+ bm_find_first_match,
+ NULL
+};
+
+static void bf_context_init(BinaryFindContext *ctx, BinaryFindResult not_found,
+ BinaryFindResult single, BinaryFindResult global,
+ Binary *pat_bin)
+{
+ ctx->exported = 0;
+ ctx->state = BFSearch;
+ ctx->not_found = not_found;
+ if (ctx->flags & BF_FLAG_GLOBAL) {
+ ctx->found = global;
+ if (ctx->pat_type == am_bm) {
+ ctx->search = &bf_search_bm_global;
+ ctx->loop_factor = BM_LOOP_FACTOR;
+ } else if (ctx->pat_type == am_ac) {
+ ctx->search = &bf_search_ac_global;
+ ctx->loop_factor = AC_LOOP_FACTOR;
+ }
+ } else {
+ ctx->found = single;
+ if (ctx->pat_type == am_bm) {
+ ctx->search = &bf_search_bm_single;
+ ctx->loop_factor = BM_LOOP_FACTOR;
+ } else if (ctx->pat_type == am_ac) {
+ ctx->search = &bf_search_ac_single;
+ ctx->loop_factor = AC_LOOP_FACTOR;
+ }
+ }
+ ctx->trap_term = THE_NON_VALUE;
+ ctx->pat_bin = pat_bin;
+ ctx->search->init(ctx);
+}
-typedef struct BinaryFindState_bignum {
- Eterm bignum_hdr;
- BinaryFindState bfs;
- union {
- BMFindFirstState bmffs;
- BMFindAllState bmfas;
- ACFindFirstState acffs;
- ACFindAllState acfas;
- } data;
-} BinaryFindState_bignum;
-
-#define SIZEOF_BINARY_FIND_STATE(S) \
- (sizeof(BinaryFindState)+sizeof(S))
-
-#define SIZEOF_BINARY_FIND_ALL_STATE(S) \
- (sizeof(BinaryFindState)+sizeof(S)+(sizeof(FindallData)*(S).m))
-
-static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs);
-static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindState *bfs,
- Sint pos, Sint len);
-static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindState *bfs,
- FindallData *fad, Uint fad_sz);
-static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs);
-static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState *bfs,
- Sint pos, Sint len);
-static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState *bfs,
- FindallData *fad, Uint fad_sz);
+static BinaryFindContext *bf_context_export(Process *p, BinaryFindContext *src)
+{
+ Binary *ctx_bin;
+ BinaryFindContext *ctx;
+ Eterm *hp;
+
+ ASSERT(src->exported == 0);
+ ctx_bin = erts_create_magic_binary(sizeof(BinaryFindContext),
+ bf_context_destructor);
+ ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
+ sys_memcpy(ctx, src, sizeof(BinaryFindContext));
+ if (ctx->pat_bin != NULL && ctx->pat_term == THE_NON_VALUE) {
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE * 2);
+ ctx->pat_term = erts_mk_magic_ref(&hp, &MSO(p), ctx->pat_bin);
+ } else {
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ }
+ ctx->trap_term = erts_mk_magic_ref(&hp, &MSO(p), ctx_bin);
+ ctx->exported = 1;
+ return ctx;
+}
+
+static int bf_context_destructor(Binary *ctx_bin)
+{
+ BinaryFindContext *ctx;
+
+ ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
+ if (ctx->state != BFDone) {
+ if (ctx->search->done != NULL) {
+ ctx->search->done(ctx);
+ }
+ ctx->state = BFDone;
+ }
+ return 1;
+}
+
+#ifdef HARDDEBUG
+static void bf_context_dump(BinaryFindContext *ctx)
+{
+ if (ctx->pat_type == am_bm) {
+ BMData *bm;
+ bm = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
+ dump_bm_data(bm);
+ } else {
+ ACTrie *act;
+ act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin);
+ dump_ac_trie(act);
+ }
+}
+#endif
+
+static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp);
+
+static BFReturn maybe_binary_match_compile(BinaryFindContext *ctx, Eterm arg, Binary **pat_bin)
+{
+ Eterm *tp;
+ ctx->pat_term = THE_NON_VALUE;
+ if (is_tuple(arg)) {
+ tp = tuple_val(arg);
+ if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
+ return BF_BADARG;
+ }
+ if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
+ !is_internal_magic_ref(tp[2])) {
+ return BF_BADARG;
+ }
+ *pat_bin = erts_magic_ref2bin(tp[2]);
+ if ((tp[1] == am_bm &&
+ ERTS_MAGIC_BIN_DESTRUCTOR(*pat_bin) != cleanup_my_data_bm) ||
+ (tp[1] == am_ac &&
+ ERTS_MAGIC_BIN_DESTRUCTOR(*pat_bin) != cleanup_my_data_ac)) {
+ *pat_bin = NULL;
+ return BF_BADARG;
+ }
+ ctx->pat_type = tp[1];
+ ctx->pat_term = tp[2];
+ } else if (do_binary_match_compile(arg, &(ctx->pat_type), pat_bin) != 0) {
+ return BF_BADARG;
+ }
+ return BF_OK;
+}
static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp)
{
@@ -1141,17 +1275,17 @@ static int parse_split_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp, Uin
Uint orig_size;
if (is_atom(t)) {
if (t == am_global) {
- *optp |= BINARY_FIND_ALL;
+ *optp |= BF_FLAG_GLOBAL;
l = CDR(list_val(l));
continue;
}
if (t == am_trim) {
- *optp |= BINARY_SPLIT_TRIM;
+ *optp |= BF_FLAG_SPLIT_TRIM;
l = CDR(list_val(l));
continue;
}
if (t == am_trim_all) {
- *optp |= BINARY_SPLIT_TRIM_ALL;
+ *optp |= BF_FLAG_SPLIT_TRIM_ALL;
l = CDR(list_val(l));
continue;
}
@@ -1204,266 +1338,160 @@ static int parse_split_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp, Uin
}
}
-static int do_binary_find(Process *p, Eterm subject, BinaryFindState *bfs, Binary *bin,
- Eterm state_term, Eterm *res_term)
+static BFReturn do_binary_find(Process *p, Eterm subject, BinaryFindContext **ctxp,
+ Binary *pat_bin, Binary *ctx_bin, Eterm *res_term)
{
- byte *bytes;
- Uint bitoffs, bitsize;
- byte *temp_alloc = NULL;
- BinaryFindState_bignum *state_ptr = NULL;
+ BinaryFindContext *ctx;
+ int is_first_call;
+ Uint initial_reds;
+ BFReturn runres;
- ERTS_GET_BINARY_BYTES(subject, bytes, bitoffs, bitsize);
- if (bitsize != 0) {
- goto badarg;
- }
- if (bitoffs != 0) {
- bytes = erts_get_aligned_binary_bytes(subject, &temp_alloc);
- }
- if (state_term != NIL) {
- state_ptr = (BinaryFindState_bignum *)(big_val(state_term));
- bfs = &(state_ptr->bfs);
+ if (ctx_bin == NULL) {
+ is_first_call = 1;
+ ctx = *ctxp;
+ } else {
+ is_first_call = 0;
+ ctx = ERTS_MAGIC_BIN_DATA(ctx_bin);
+ ctx->pat_bin = pat_bin;
+ *ctxp = ctx;
}
- if (bfs->flags & BINARY_FIND_ALL) {
- if (bfs->type == am_bm) {
- BMData *bm;
- Sint pos;
- BMFindAllState state;
- Uint reds = get_reds(p, BM_LOOP_FACTOR);
- Uint save_reds = reds;
+ initial_reds = ctx->reds = get_reds(p, ctx->loop_factor);
- bm = (BMData *) ERTS_MAGIC_BIN_DATA(bin);
-#ifdef HARDDEBUG
- dump_bm_data(bm);
-#endif
- if (state_term == NIL) {
- bm_init_find_all(&state, bfs->hsstart, bfs->hsend);
- } else {
- bm_restore_find_all(&state, &(state_ptr->data.bmfas));
- }
+ switch (ctx->state) {
+ case BFSearch: {
+ byte *bytes;
+ Uint bitoffs, bitsize;
+ byte *temp_alloc = NULL;
- pos = bm_find_all_non_overlapping(&state, bm, bytes, &reds);
- if (pos == BM_NOT_FOUND) {
- *res_term = bfs->not_found_result(p, subject, bfs);
- } else if (pos == BM_RESTART) {
- int x =
- (SIZEOF_BINARY_FIND_ALL_STATE(state) / sizeof(Eterm)) +
- !!(SIZEOF_BINARY_FIND_ALL_STATE(state) % sizeof(Eterm));
-#ifdef HARDDEBUG
- erts_printf("Trap bm!\n");
-#endif
- state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1);
- state_ptr->bignum_hdr = make_pos_bignum_header(x);
- memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState));
- bm_serialize_find_all(&state, &state_ptr->data.bmfas);
- *res_term = make_big(&state_ptr->bignum_hdr);
- erts_free_aligned_binary_bytes(temp_alloc);
- bm_clean_find_all(&state);
- return DO_BIN_MATCH_RESTART;
- } else {
- *res_term = bfs->global_result(p, subject, bfs, state.out, state.m);
- }
- erts_free_aligned_binary_bytes(temp_alloc);
- bm_clean_find_all(&state);
- BUMP_REDS(p, (save_reds - reds) / BM_LOOP_FACTOR);
- return DO_BIN_MATCH_OK;
- } else if (bfs->type == am_ac) {
- ACTrie *act;
- int acr;
- ACFindAllState state;
- Uint reds = get_reds(p, AC_LOOP_FACTOR);
- Uint save_reds = reds;
-
- act = (ACTrie *) ERTS_MAGIC_BIN_DATA(bin);
+ ERTS_GET_BINARY_BYTES(subject, bytes, bitoffs, bitsize);
+ if (bitsize != 0) {
+ goto badarg;
+ }
+ if (bitoffs != 0) {
+ bytes = erts_get_aligned_binary_bytes(subject, &temp_alloc);
+ }
#ifdef HARDDEBUG
- dump_ac_trie(act);
+ bf_context_dump(ctx);
#endif
- if (state_term == NIL) {
- ac_init_find_all(&state, act, bfs->hsstart, bfs->hsend);
- } else {
- ac_restore_find_all(&state, &(state_ptr->data.acfas));
- }
- acr = ac_find_all_non_overlapping(&state, bytes, &reds);
- if (acr == AC_NOT_FOUND) {
- *res_term = bfs->not_found_result(p, subject, bfs);
- } else if (acr == AC_RESTART) {
- int x =
- (SIZEOF_BINARY_FIND_ALL_STATE(state) / sizeof(Eterm)) +
- !!(SIZEOF_BINARY_FIND_ALL_STATE(state) % sizeof(Eterm));
+ runres = ctx->search->find(ctx, bytes);
+ if (runres == BF_NOT_FOUND) {
+ *res_term = ctx->not_found(p, subject, &ctx);
+ *ctxp = ctx;
+ } else if (runres == BF_RESTART) {
#ifdef HARDDEBUG
+ if (ctx->pat_type == am_ac) {
erts_printf("Trap ac!\n");
-#endif
- state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1);
- state_ptr->bignum_hdr = make_pos_bignum_header(x);
- memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState));
- ac_serialize_find_all(&state, &state_ptr->data.acfas);
- *res_term = make_big(&state_ptr->bignum_hdr);
- erts_free_aligned_binary_bytes(temp_alloc);
- ac_clean_find_all(&state);
- return DO_BIN_MATCH_RESTART;
} else {
- *res_term = bfs->global_result(p, subject, bfs, state.out, state.m);
- }
- erts_free_aligned_binary_bytes(temp_alloc);
- ac_clean_find_all(&state);
- BUMP_REDS(p, (save_reds - reds) / AC_LOOP_FACTOR);
- return DO_BIN_MATCH_OK;
- }
- } else {
- if (bfs->type == am_bm) {
- BMData *bm;
- Sint pos;
- BMFindFirstState state;
- Uint reds = get_reds(p, BM_LOOP_FACTOR);
- Uint save_reds = reds;
-
- bm = (BMData *) ERTS_MAGIC_BIN_DATA(bin);
-#ifdef HARDDEBUG
- dump_bm_data(bm);
-#endif
- if (state_term == NIL) {
- bm_init_find_first_match(&state, bfs->hsstart, bfs->hsend);
- } else {
- memcpy(&state, &state_ptr->data.bmffs, sizeof(BMFindFirstState));
- }
-
-#ifdef HARDDEBUG
- erts_printf("(bm) state->pos = %ld, state->len = %lu\n",state.pos,
- state.len);
-#endif
- pos = bm_find_first_match(&state, bm, bytes, &reds);
- if (pos == BM_NOT_FOUND) {
- *res_term = bfs->not_found_result(p, subject, bfs);
- } else if (pos == BM_RESTART) {
- int x =
- (SIZEOF_BINARY_FIND_STATE(state) / sizeof(Eterm)) +
- !!(SIZEOF_BINARY_FIND_STATE(state) % sizeof(Eterm));
-#ifdef HARDDEBUG
erts_printf("Trap bm!\n");
+ }
#endif
- state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1);
- state_ptr->bignum_hdr = make_pos_bignum_header(x);
- memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState));
- memcpy(&state_ptr->data.acffs, &state, sizeof(BMFindFirstState));
- *res_term = make_big(&state_ptr->bignum_hdr);
- erts_free_aligned_binary_bytes(temp_alloc);
- return DO_BIN_MATCH_RESTART;
- } else {
- *res_term = bfs->single_result(p, subject, bfs, pos, bm->len);
+ if (is_first_call) {
+ ctx = bf_context_export(p, ctx);
+ *ctxp = ctx;
+ erts_set_gc_state(p, 0);
}
erts_free_aligned_binary_bytes(temp_alloc);
- BUMP_REDS(p, (save_reds - reds) / BM_LOOP_FACTOR);
- return DO_BIN_MATCH_OK;
- } else if (bfs->type == am_ac) {
- ACTrie *act;
- Uint pos, rlen;
- int acr;
- ACFindFirstState state;
- Uint reds = get_reds(p, AC_LOOP_FACTOR);
- Uint save_reds = reds;
-
- act = (ACTrie *) ERTS_MAGIC_BIN_DATA(bin);
-#ifdef HARDDEBUG
- dump_ac_trie(act);
-#endif
- if (state_term == NIL) {
- ac_init_find_first_match(&state, act, bfs->hsstart, bfs->hsend);
- } else {
- memcpy(&state, &state_ptr->data.acffs, sizeof(ACFindFirstState));
+ *res_term = THE_NON_VALUE;
+ BUMP_ALL_REDS(p);
+ return BF_RESTART;
+ } else {
+ *res_term = ctx->found(p, subject, &ctx);
+ *ctxp = ctx;
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ if (*res_term == THE_NON_VALUE) {
+ if (is_first_call) {
+ erts_set_gc_state(p, 0);
}
- acr = ac_find_first_match(&state, bytes, &pos, &rlen, &reds);
- if (acr == AC_NOT_FOUND) {
- *res_term = bfs->not_found_result(p, subject, bfs);
- } else if (acr == AC_RESTART) {
- int x =
- (SIZEOF_BINARY_FIND_STATE(state) / sizeof(Eterm)) +
- !!(SIZEOF_BINARY_FIND_STATE(state) % sizeof(Eterm));
-#ifdef HARDDEBUG
- erts_printf("Trap ac!\n");
-#endif
- state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1);
- state_ptr->bignum_hdr = make_pos_bignum_header(x);
- memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState));
- memcpy(&state_ptr->data.acffs, &state, sizeof(ACFindFirstState));
- *res_term = make_big(&state_ptr->bignum_hdr);
- erts_free_aligned_binary_bytes(temp_alloc);
- return DO_BIN_MATCH_RESTART;
- } else {
- *res_term = bfs->single_result(p, subject, bfs, pos, rlen);
+ BUMP_ALL_REDS(p);
+ return BF_RESTART;
+ }
+ if (ctx->search->done != NULL) {
+ ctx->search->done(ctx);
+ }
+ ctx->state = BFDone;
+ if (!is_first_call) {
+ erts_set_gc_state(p, 1);
+ }
+ BUMP_REDS(p, (initial_reds - ctx->reds) / ctx->loop_factor);
+ return BF_OK;
+ }
+ case BFResult: {
+ *res_term = ctx->found(p, subject, &ctx);
+ *ctxp = ctx;
+ if (*res_term == THE_NON_VALUE) {
+ if (is_first_call) {
+ erts_set_gc_state(p, 0);
}
- erts_free_aligned_binary_bytes(temp_alloc);
- BUMP_REDS(p, (save_reds - reds) / AC_LOOP_FACTOR);
- return DO_BIN_MATCH_OK;
+ BUMP_ALL_REDS(p);
+ return BF_RESTART;
}
+ if (ctx->search->done != NULL) {
+ ctx->search->done(ctx);
+ }
+ ctx->state = BFDone;
+ if (!is_first_call) {
+ erts_set_gc_state(p, 1);
+ }
+ BUMP_REDS(p, (initial_reds - ctx->reds) / ctx->loop_factor);
+ return BF_OK;
}
- badarg:
- return DO_BIN_MATCH_BADARG;
+ default:
+ ASSERT(!"Unknown state in do_binary_find");
+ }
+
+badarg:
+ if (!is_first_call) {
+ if (ctx->search->done != NULL) {
+ ctx->search->done(ctx);
+ }
+ ctx->state = BFDone;
+ erts_set_gc_state(p, 1);
+ }
+ return BF_BADARG;
}
static BIF_RETTYPE
binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Uint flags)
{
- BinaryFindState bfs;
- Eterm *tp;
- Binary *bin;
- Eterm bin_term = NIL;
+ BinaryFindContext c_buff;
+ BinaryFindContext *ctx = &c_buff;
+ Binary *pat_bin;
int runres;
Eterm result;
- if (is_not_binary(arg1)) {
+ if (is_not_binary(arg1) || binary_bitsize(arg1) != 0) {
goto badarg;
}
- bfs.flags = flags;
- if (parse_match_opts_list(arg3, arg1, &(bfs.hsstart), &(bfs.hsend))) {
+ ctx->flags = flags;
+ if (parse_match_opts_list(arg3, arg1, &(ctx->hsstart), &(ctx->hsend))) {
goto badarg;
}
- if (bfs.hsend == 0) {
- BIF_RET(do_match_not_found_result(p, arg1, &bfs));
+ if (ctx->hsend == 0) {
+ result = do_match_not_found_result(p, arg1, &ctx);
+ BIF_RET(result);
}
- if (is_tuple(arg2)) {
- tp = tuple_val(arg2);
- if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
- goto badarg;
- }
- if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
- !ERTS_TERM_IS_MAGIC_BINARY(tp[2])) {
- goto badarg;
- }
- bfs.type = tp[1];
- bin = ((ProcBin *) binary_val(tp[2]))->val;
- if (bfs.type == am_bm &&
- ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) {
- goto badarg;
- }
- if (bfs.type == am_ac &&
- ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_ac) {
- goto badarg;
- }
- bin_term = tp[2];
- } else if (do_binary_match_compile(arg2, &(bfs.type), &bin)) {
+ if (maybe_binary_match_compile(ctx, arg2, &pat_bin) != BF_OK) {
goto badarg;
}
- bfs.not_found_result = &do_match_not_found_result;
- bfs.single_result = &do_match_single_result;
- bfs.global_result = &do_match_global_result;
- runres = do_binary_find(p, arg1, &bfs, bin, NIL, &result);
- if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
- } else if (bin_term == NIL) {
- erts_bin_free(bin);
+ bf_context_init(ctx, do_match_not_found_result, do_match_single_result,
+ do_match_global_result, pat_bin);
+ runres = do_binary_find(p, arg1, &ctx, pat_bin, NULL, &result);
+ if (runres == BF_OK && ctx->pat_term == THE_NON_VALUE) {
+ erts_bin_free(pat_bin);
}
switch (runres) {
- case DO_BIN_MATCH_OK:
+ case BF_OK:
BIF_RET(result);
- case DO_BIN_MATCH_RESTART:
- BUMP_ALL_REDS(p);
- BIF_TRAP3(&binary_find_trap_export, p, arg1, result, bin_term);
+ case BF_RESTART:
+ ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result);
+ BIF_TRAP3(&binary_find_trap_export, p, arg1, ctx->trap_term, ctx->pat_term);
default:
goto badarg;
}
- badarg:
- BIF_ERROR(p,BADARG);
+badarg:
+ BIF_ERROR(p, BADARG);
}
BIF_RETTYPE binary_match_2(BIF_ALIST_2)
@@ -1478,76 +1506,52 @@ BIF_RETTYPE binary_match_3(BIF_ALIST_3)
BIF_RETTYPE binary_matches_2(BIF_ALIST_2)
{
- return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, THE_NON_VALUE, BINARY_FIND_ALL);
+ return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, THE_NON_VALUE, BF_FLAG_GLOBAL);
}
BIF_RETTYPE binary_matches_3(BIF_ALIST_3)
{
- return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BINARY_FIND_ALL);
+ return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BF_FLAG_GLOBAL);
}
static BIF_RETTYPE
binary_split(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
{
- BinaryFindState bfs;
- Eterm *tp;
- Binary *bin;
- Eterm bin_term = NIL;
+ BinaryFindContext c_buff;
+ BinaryFindContext *ctx = &c_buff;
+ Binary *pat_bin;
int runres;
Eterm result;
- if (is_not_binary(arg1)) {
+ if (is_not_binary(arg1) || binary_bitsize(arg1) != 0) {
goto badarg;
}
- if (parse_split_opts_list(arg3, arg1, &(bfs.hsstart), &(bfs.hsend), &(bfs.flags))) {
+ if (parse_split_opts_list(arg3, arg1, &(ctx->hsstart), &(ctx->hsend), &(ctx->flags))) {
goto badarg;
}
- if (bfs.hsend == 0) {
- result = do_split_not_found_result(p, arg1, &bfs);
+ if (ctx->hsend == 0) {
+ result = do_split_not_found_result(p, arg1, &ctx);
BIF_RET(result);
}
- if (is_tuple(arg2)) {
- tp = tuple_val(arg2);
- if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
- goto badarg;
- }
- if (((tp[1] != am_bm) && (tp[1] != am_ac)) ||
- !ERTS_TERM_IS_MAGIC_BINARY(tp[2])) {
- goto badarg;
- }
- bfs.type = tp[1];
- bin = ((ProcBin *) binary_val(tp[2]))->val;
- if (bfs.type == am_bm &&
- ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) {
- goto badarg;
- }
- if (bfs.type == am_ac &&
- ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_ac) {
- goto badarg;
- }
- bin_term = tp[2];
- } else if (do_binary_match_compile(arg2, &(bfs.type), &bin)) {
+ if (maybe_binary_match_compile(ctx, arg2, &pat_bin) != BF_OK) {
goto badarg;
}
- bfs.not_found_result = &do_split_not_found_result;
- bfs.single_result = &do_split_single_result;
- bfs.global_result = &do_split_global_result;
- runres = do_binary_find(p, arg1, &bfs, bin, NIL, &result);
- if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
- } else if (bin_term == NIL) {
- erts_bin_free(bin);
- }
- switch(runres) {
- case DO_BIN_MATCH_OK:
+ bf_context_init(ctx, do_split_not_found_result, do_split_single_result,
+ do_split_global_result, pat_bin);
+ runres = do_binary_find(p, arg1, &ctx, pat_bin, NULL, &result);
+ if (runres == BF_OK && ctx->pat_term == THE_NON_VALUE) {
+ erts_bin_free(pat_bin);
+ }
+ switch (runres) {
+ case BF_OK:
BIF_RET(result);
- case DO_BIN_MATCH_RESTART:
- BIF_TRAP3(&binary_find_trap_export, p, arg1, result, bin_term);
+ case BF_RESTART:
+ ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result);
+ BIF_TRAP3(&binary_find_trap_export, p, arg1, ctx->trap_term, ctx->pat_term);
default:
goto badarg;
}
- badarg:
+badarg:
BIF_ERROR(p, BADARG);
}
@@ -1561,72 +1565,117 @@ BIF_RETTYPE binary_split_3(BIF_ALIST_3)
return binary_split(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
-static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs)
+static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
{
- if (bfs->flags & BINARY_FIND_ALL) {
+ if ((*ctxp)->flags & BF_FLAG_GLOBAL) {
return NIL;
} else {
return am_nomatch;
}
}
-static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindState *bfs,
- Sint pos, Sint len)
+static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
{
+ BinaryFindContext *ctx = (*ctxp);
+ BinaryFindFirstContext *ff = &(ctx->u.ff);
Eterm erlen;
Eterm *hp;
Eterm ret;
- erlen = erts_make_integer((Uint)(len), p);
- ret = erts_make_integer(pos, p);
+ erlen = erts_make_integer((Uint)(ff->len), p);
+ ret = erts_make_integer(ff->pos, p);
hp = HAlloc(p, 3);
ret = TUPLE2(hp, ret, erlen);
return ret;
}
-static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindState *bfs,
- FindallData *fad, Uint fad_sz)
+static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
{
- Sint i;
+ BinaryFindContext *ctx = (*ctxp);
+ BinaryFindAllContext *fa = &(ctx->u.fa);
+ FindallData *fad;
Eterm tpl;
- Eterm *hp;
- Eterm ret;
+ Sint i;
+ register Uint reds = ctx->reds;
- for (i = 0; i < fad_sz; ++i) {
- fad[i].epos = erts_make_integer(fad[i].pos, p);
- fad[i].elen = erts_make_integer(fad[i].len, p);
+ if (ctx->state == BFSearch) {
+ if (ctx->pat_type == am_ac) {
+ fa->data = fa->d.ac.out;
+ fa->size = fa->d.ac.m;
+ } else {
+ fa->data = fa->d.bm.out;
+ fa->size = fa->d.bm.m;
+ }
+ fa->tail = fa->size - 1;
+ fa->head = 0;
+ fa->end_pos = 0;
+ fa->term = NIL;
+ if (ctx->exported == 0 && ((fa->size * 2) >= reds)) {
+ ctx = bf_context_export(p, ctx);
+ *ctxp = ctx;
+ fa = &(ctx->u.fa);
+ }
+ erts_factory_proc_prealloc_init(&(fa->factory), p, fa->size * (3 + 2));
+ ctx->state = BFResult;
+ }
+
+ fad = fa->data;
+
+ if (fa->end_pos == 0) {
+ for (i = fa->head; i < fa->size; ++i) {
+ if (--reds == 0) {
+ ASSERT(ctx->exported == 1);
+ fa->head = i;
+ ctx->reds = reds;
+ return THE_NON_VALUE;
+ }
+ fad[i].epos = erts_make_integer(fad[i].pos, p);
+ fad[i].elen = erts_make_integer(fad[i].len, p);
+ }
+ fa->end_pos = 1;
+ fa->head = fa->tail;
}
- hp = HAlloc(p, fad_sz * (3 + 2));
- ret = NIL;
- for (i = fad_sz - 1; i >= 0; --i) {
- tpl = TUPLE2(hp, fad[i].epos, fad[i].elen);
- hp += 3;
- ret = CONS(hp, tpl, ret);
- hp += 2;
+
+ for (i = fa->head; i >= 0; --i) {
+ if (--reds == 0) {
+ ASSERT(ctx->exported == 1);
+ fa->head = i;
+ ctx->reds = reds;
+ return THE_NON_VALUE;
+ }
+ tpl = TUPLE2(fa->factory.hp, fad[i].epos, fad[i].elen);
+ fa->factory.hp += 3;
+ fa->term = CONS(fa->factory.hp, tpl, fa->term);
+ fa->factory.hp += 2;
}
+ ctx->reds = reds;
+ erts_factory_close(&(fa->factory));
- return ret;
+ return fa->term;
}
-static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs)
+static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
{
+ BinaryFindContext *ctx = (*ctxp);
Eterm *hp;
Eterm ret;
- if (bfs->flags & (BINARY_SPLIT_TRIM | BINARY_SPLIT_TRIM_ALL)
+ if (ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL)
&& binary_size(subject) == 0) {
- return NIL;
+ return NIL;
}
hp = HAlloc(p, 2);
ret = CONS(hp, subject, NIL);
-
return ret;
}
-static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState *bfs,
- Sint pos, Sint len)
+static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
{
+ BinaryFindContext *ctx = (*ctxp);
+ BinaryFindFirstContext *ff = &(ctx->u.ff);
+ Sint pos;
+ Sint len;
size_t orig_size;
Eterm orig;
Uint offset;
@@ -1637,9 +1686,12 @@ static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState *
Eterm *hp;
Eterm ret;
+ pos = ff->pos;
+ len = ff->len;
+
orig_size = binary_size(subject);
- if ((bfs->flags & (BINARY_SPLIT_TRIM | BINARY_SPLIT_TRIM_ALL)) &&
+ if ((ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL)) &&
(orig_size - pos - len) == 0) {
if (pos == 0) {
ret = NIL;
@@ -1660,7 +1712,7 @@ static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState *
hp += 2;
}
} else {
- if ((bfs->flags & BINARY_SPLIT_TRIM_ALL) && (pos == 0)) {
+ if ((ctx->flags & BF_FLAG_SPLIT_TRIM_ALL) && (pos == 0)) {
hp = HAlloc(p, 1 * (ERL_SUB_BIN_SIZE + 2));
ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size);
sb1 = NULL;
@@ -1698,39 +1750,60 @@ static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState *
return ret;
}
-static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState *bfs,
- FindallData *fad, Uint fad_sz)
+static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp)
{
- size_t orig_size;
+ BinaryFindContext *ctx = (*ctxp);
+ BinaryFindAllContext *fa = &(ctx->u.fa);
+ FindallData *fad;
Eterm orig;
+ size_t orig_size;
Uint offset;
Uint bit_offset;
Uint bit_size;
ErlSubBin *sb;
+ Uint do_trim;
Sint i;
- Sint tail;
- Uint list_size;
- Uint end_pos;
- Uint do_trim = bfs->flags & (BINARY_SPLIT_TRIM | BINARY_SPLIT_TRIM_ALL);
- Eterm *hp;
- Eterm *hendp;
- Eterm ret;
+ register Uint reds = ctx->reds;
- tail = fad_sz - 1;
- list_size = fad_sz + 1;
- orig_size = binary_size(subject);
- end_pos = (Uint)(orig_size);
+ if (ctx->state == BFSearch) {
+ if (ctx->pat_type == am_ac) {
+ fa->data = fa->d.ac.out;
+ fa->size = fa->d.ac.m;
+ } else {
+ fa->data = fa->d.bm.out;
+ fa->size = fa->d.bm.m;
+ }
+ fa->tail = fa->size - 1;
+ fa->head = fa->tail;
+ orig_size = binary_size(subject);
+ fa->end_pos = (Uint)(orig_size);
+ fa->term = NIL;
+ if (ctx->exported == 0 && ((fa->head + 1) >= reds)) {
+ ctx = bf_context_export(p, ctx);
+ *ctxp = ctx;
+ fa = &(ctx->u.fa);
+ }
+ erts_factory_proc_prealloc_init(&(fa->factory), p, (fa->size + 1) * (ERL_SUB_BIN_SIZE + 2));
+ ctx->state = BFResult;
+ }
- hp = HAlloc(p, list_size * (ERL_SUB_BIN_SIZE + 2));
- hendp = hp + list_size * (ERL_SUB_BIN_SIZE + 2);
ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size);
ASSERT(bit_size == 0);
+ fad = fa->data;
+ do_trim = ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL);
- ret = NIL;
-
- for (i = tail; i >= 0; --i) {
- sb = (ErlSubBin *)(hp);
- sb->size = end_pos - (fad[i].pos + fad[i].len);
+ for (i = fa->head; i >= 0; --i) {
+ if (--reds == 0) {
+ ASSERT(ctx->exported == 1);
+ fa->head = i;
+ ctx->reds = reds;
+ if (!do_trim && (ctx->flags & BF_FLAG_SPLIT_TRIM)) {
+ ctx->flags &= ~BF_FLAG_SPLIT_TRIM;
+ }
+ return THE_NON_VALUE;
+ }
+ sb = (ErlSubBin *)(fa->factory.hp);
+ sb->size = fa->end_pos - (fad[i].pos + fad[i].len);
if (!(sb->size == 0 && do_trim)) {
sb->thing_word = HEADER_SUB_BIN;
sb->offs = offset + fad[i].pos + fad[i].len;
@@ -1738,15 +1811,18 @@ static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState *
sb->bitoffs = bit_offset;
sb->bitsize = 0;
sb->is_writable = 0;
- hp += ERL_SUB_BIN_SIZE;
- ret = CONS(hp, make_binary(sb), ret);
- hp += 2;
- do_trim &= ~BINARY_SPLIT_TRIM;
+ fa->factory.hp += ERL_SUB_BIN_SIZE;
+ fa->term = CONS(fa->factory.hp, make_binary(sb), fa->term);
+ fa->factory.hp += 2;
+ do_trim &= ~BF_FLAG_SPLIT_TRIM;
}
- end_pos = fad[i].pos;
+ fa->end_pos = fad[i].pos;
}
- sb = (ErlSubBin *)(hp);
+ fa->head = i;
+ ctx->reds = reds;
+
+ sb = (ErlSubBin *)(fa->factory.hp);
sb->size = fad[0].pos;
if (!(sb->size == 0 && do_trim)) {
sb->thing_word = HEADER_SUB_BIN;
@@ -1755,25 +1831,31 @@ static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState *
sb->bitoffs = bit_offset;
sb->bitsize = 0;
sb->is_writable = 0;
- hp += ERL_SUB_BIN_SIZE;
- ret = CONS(hp, make_binary(sb), ret);
- hp += 2;
+ fa->factory.hp += ERL_SUB_BIN_SIZE;
+ fa->term = CONS(fa->factory.hp, make_binary(sb), fa->term);
+ fa->factory.hp += 2;
}
- HRelease(p, hendp, hp);
- return ret;
+ erts_factory_close(&(fa->factory));
+
+ return fa->term;
}
static BIF_RETTYPE binary_find_trap(BIF_ALIST_3)
{
int runres;
Eterm result;
- Binary *bin = ((ProcBin *) binary_val(BIF_ARG_3))->val;
- runres = do_binary_find(BIF_P, BIF_ARG_1, NULL, bin, BIF_ARG_2, &result);
- if (runres == DO_BIN_MATCH_OK) {
+ Binary *ctx_bin = erts_magic_ref2bin(BIF_ARG_2);
+ Binary *pat_bin = erts_magic_ref2bin(BIF_ARG_3);
+ BinaryFindContext *ctx = NULL;
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == bf_context_destructor);
+ runres = do_binary_find(BIF_P, BIF_ARG_1, &ctx, pat_bin, ctx_bin, &result);
+ if (runres == BF_OK) {
+ ASSERT(result != THE_NON_VALUE);
BIF_RET(result);
} else {
- BUMP_ALL_REDS(BIF_P);
- BIF_TRAP3(&binary_find_trap_export, BIF_P, BIF_ARG_1, result, BIF_ARG_3);
+ ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result);
+ BIF_TRAP3(&binary_find_trap_export, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
}
@@ -2066,7 +2148,7 @@ static int do_search_backward(CommonData *cd, Uint *posp, Uint *redsp)
}
}
-static void cleanup_common_data(Binary *bp)
+static int cleanup_common_data(Binary *bp)
{
int i;
CommonData *cd;
@@ -2083,7 +2165,7 @@ static void cleanup_common_data(Binary *bp)
break;
}
}
- return;
+ return 1;
}
static BIF_RETTYPE do_longest_common(Process *p, Eterm list, int direction)
@@ -2182,12 +2264,12 @@ static BIF_RETTYPE do_longest_common(Process *p, Eterm list, int direction)
if (cd[i].type == CL_TYPE_HEAP_NOALLOC) {
unsigned char *tmp = cd[i].buff;
cd[i].buff = erts_alloc(ERTS_ALC_T_BINARY_BUFFER, cd[i].bufflen);
- memcpy(cd[i].buff,tmp,cd[i].bufflen);
+ sys_memcpy(cd[i].buff,tmp,cd[i].bufflen);
cd[i].type = CL_TYPE_HEAP;
}
}
- hp = HAlloc(p, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), mb);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ bin_term = erts_mk_magic_ref(&hp, &MSO(p), mb);
BUMP_ALL_REDS(p);
BIF_TRAP3(trapper, p, bin_term, epos,list);
}
@@ -2214,8 +2296,7 @@ static BIF_RETTYPE do_longest_common_trap(Process *p, Eterm bin_term, Eterm curr
#else
term_to_Uint(current_pos, &pos);
#endif
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(bin_term));
- bin = ((ProcBin *) binary_val(bin_term))->val;
+ bin = erts_magic_ref2bin(bin_term);
cd = (CommonData *) ERTS_MAGIC_BIN_DATA(bin);
if (direction == DIRECTION_PREFIX) {
trapper = &binary_longest_prefix_trap_export;
@@ -2353,191 +2434,6 @@ BIF_RETTYPE binary_at_2(BIF_ALIST_2)
BIF_ERROR(BIF_P,BADARG);
}
-#define BIN_TO_LIST_OK 0
-#define BIN_TO_LIST_TRAP 1
-/* No badarg, checked before call */
-
-#define BIN_TO_LIST_LOOP_FACTOR 10
-
-static int do_bin_to_list(Process *p, byte *bytes, Uint bit_offs,
- Uint start, Sint *lenp, Eterm *termp)
-{
- Uint reds = get_reds(p, BIN_TO_LIST_LOOP_FACTOR); /* reds can never be 0 */
- Uint len = *lenp;
- Uint loops;
- Eterm *hp;
- Eterm term = *termp;
- Uint n;
-
- ASSERT(reds > 0);
-
- loops = MIN(reds,len);
-
- BUMP_REDS(p, loops / BIN_TO_LIST_LOOP_FACTOR);
-
- hp = HAlloc(p,2*loops);
- while (loops--) {
- --len;
- if (bit_offs) {
- n = ((((Uint) bytes[start+len]) << bit_offs) |
- (((Uint) bytes[start+len+1]) >> (8-bit_offs))) & 0xFF;
- } else {
- n = bytes[start+len];
- }
-
- term = CONS(hp,make_small(n),term);
- hp +=2;
- }
- *termp = term;
- *lenp = len;
- if (len) {
- BUMP_ALL_REDS(p);
- return BIN_TO_LIST_TRAP;
- }
- return BIN_TO_LIST_OK;
-}
-
-
-static BIF_RETTYPE do_trap_bin_to_list(Process *p, Eterm binary,
- Uint start, Sint len, Eterm sofar)
-{
- Eterm *hp;
- Eterm blob;
-
- hp = HAlloc(p,3);
- hp[0] = make_pos_bignum_header(2);
- hp[1] = start;
- hp[2] = (Uint) len;
- blob = make_big(hp);
- BIF_TRAP3(&binary_bin_to_list_trap_export, p, binary, blob, sofar);
-}
-
-static BIF_RETTYPE binary_bin_to_list_trap(BIF_ALIST_3)
-{
- Eterm *ptr;
- Uint start;
- Sint len;
- byte *bytes;
- Uint bit_offs;
- Uint bit_size;
- Eterm res = BIF_ARG_3;
-
- ptr = big_val(BIF_ARG_2);
- start = ptr[1];
- len = (Sint) ptr[2];
-
- ERTS_GET_BINARY_BYTES(BIF_ARG_1,bytes,bit_offs,bit_size);
- if (do_bin_to_list(BIF_P, bytes, bit_offs, start, &len, &res) ==
- BIN_TO_LIST_OK) {
- BIF_RET(res);
- }
- return do_trap_bin_to_list(BIF_P,BIF_ARG_1,start,len,res);
-}
-
-static BIF_RETTYPE binary_bin_to_list_common(Process *p,
- Eterm bin,
- Eterm epos,
- Eterm elen)
-{
- Uint pos;
- Sint len;
- size_t sz;
- byte *bytes;
- Uint bit_offs;
- Uint bit_size;
- Eterm res = NIL;
-
- if (is_not_binary(bin)) {
- goto badarg;
- }
- if (!term_to_Uint(epos, &pos)) {
- goto badarg;
- }
- if (!term_to_Sint(elen, &len)) {
- goto badarg;
- }
- if (len < 0) {
- Uint lentmp = -(Uint)len;
- /* overflow */
- if ((Sint)lentmp < 0) {
- goto badarg;
- }
- len = lentmp;
- if (len > pos) {
- goto badarg;
- }
- pos -= len;
- }
- /* overflow */
- if ((pos + len) < pos || (len > 0 && (pos + len) == pos)) {
- goto badarg;
- }
- sz = binary_size(bin);
-
- if (pos+len > sz) {
- goto badarg;
- }
- ERTS_GET_BINARY_BYTES(bin,bytes,bit_offs,bit_size);
- if (bit_size != 0) {
- goto badarg;
- }
- if(do_bin_to_list(p, bytes, bit_offs, pos, &len, &res) ==
- BIN_TO_LIST_OK) {
- BIF_RET(res);
- }
- return do_trap_bin_to_list(p,bin,pos,len,res);
-
- badarg:
- BIF_ERROR(p,BADARG);
-}
-
-BIF_RETTYPE binary_bin_to_list_3(BIF_ALIST_3)
-{
- return binary_bin_to_list_common(BIF_P,BIF_ARG_1,BIF_ARG_2,BIF_ARG_3);
-}
-
-BIF_RETTYPE binary_bin_to_list_2(BIF_ALIST_2)
-{
- Eterm *tp;
-
- if (is_not_tuple(BIF_ARG_2)) {
- goto badarg;
- }
- tp = tuple_val(BIF_ARG_2);
- if (arityval(*tp) != 2) {
- goto badarg;
- }
- return binary_bin_to_list_common(BIF_P,BIF_ARG_1,tp[1],tp[2]);
- badarg:
- BIF_ERROR(BIF_P,BADARG);
-}
-
-BIF_RETTYPE binary_bin_to_list_1(BIF_ALIST_1)
-{
- Uint pos = 0;
- Sint len;
- byte *bytes;
- Uint bit_offs;
- Uint bit_size;
- Eterm res = NIL;
-
- if (is_not_binary(BIF_ARG_1)) {
- goto badarg;
- }
- len = binary_size(BIF_ARG_1);
- ERTS_GET_BINARY_BYTES(BIF_ARG_1,bytes,bit_offs,bit_size);
- if (bit_size != 0) {
- goto badarg;
- }
- if(do_bin_to_list(BIF_P, bytes, bit_offs, pos, &len, &res) ==
- BIN_TO_LIST_OK) {
- BIF_RET(res);
- }
- return do_trap_bin_to_list(BIF_P,BIF_ARG_1,pos,len,res);
- badarg:
- BIF_ERROR(BIF_P,BADARG);
-}
-
HIPE_WRAPPER_BIF_DISABLE_GC(binary_list_to_bin, 1)
BIF_RETTYPE binary_list_to_bin_1(BIF_ALIST_1)
@@ -2563,7 +2459,7 @@ typedef struct {
#define BINARY_COPY_LOOP_FACTOR 100
-static void cleanup_copy_bin_state(Binary *bp)
+static int cleanup_copy_bin_state(Binary *bp)
{
CopyBinState *cbs = (CopyBinState *) ERTS_MAGIC_BIN_DATA(bp);
if (cbs->result != NULL) {
@@ -2583,6 +2479,7 @@ static void cleanup_copy_bin_state(Binary *bp)
break;
}
cbs->source_type = BC_TYPE_EMPTY;
+ return 1;
}
/*
@@ -2659,7 +2556,7 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
0);
if (!(cbs->temp_alloc)) { /* alignment not needed, need to copy */
byte *tmp = erts_alloc(ERTS_ALC_T_BINARY_BUFFER,size);
- memcpy(tmp,cbs->source,size);
+ sys_memcpy(tmp,cbs->source,size);
cbs->source = tmp;
cbs->source_type = BC_TYPE_HEAP;
} else {
@@ -2668,20 +2565,19 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
}
cbs->result = erts_bin_nrml_alloc(target_size); /* Always offheap
if trapping */
- erts_refc_init(&(cbs->result->refc), 1);
t = (byte *) cbs->result->orig_bytes; /* No offset or anything */
pos = 0;
i = 0;
while (pos < reds) {
- memcpy(t+pos,cbs->source, size);
+ sys_memcpy(t+pos,cbs->source, size);
pos += size;
++i;
}
cbs->source_size = size;
cbs->result_pos = pos;
cbs->times_left = n-i;
- hp = HAlloc(p,PROC_BIN_SIZE);
- trap_term = erts_mk_magic_binary_term(&hp, &MSO(p), mb);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ trap_term = erts_mk_magic_ref(&hp, &MSO(p), mb);
BUMP_ALL_REDS(p);
BIF_TRAP2(&binary_copy_trap_export, p, bin, trap_term);
} else {
@@ -2698,7 +2594,7 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
}
pos = 0;
while (pos < target_size) {
- memcpy(t+pos,source, size);
+ sys_memcpy(t+pos,source, size);
pos += size;
}
erts_free_aligned_binary_bytes(temp_alloc);
@@ -2716,7 +2612,7 @@ BIF_RETTYPE binary_copy_trap(BIF_ALIST_2)
Uint reds = get_reds(BIF_P, BINARY_COPY_LOOP_FACTOR);
byte *t;
Uint pos;
- Binary *mb = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+ Binary *mb = erts_magic_ref2bin(BIF_ARG_2);
CopyBinState *cbs = (CopyBinState *) ERTS_MAGIC_BIN_DATA(mb);
Uint opos;
@@ -2728,7 +2624,7 @@ BIF_RETTYPE binary_copy_trap(BIF_ALIST_2)
if ((n-1) * size >= reds) {
Uint i = 0;
while ((pos - opos) < reds) {
- memcpy(t+pos,cbs->source, size);
+ sys_memcpy(t+pos,cbs->source, size);
pos += size;
++i;
}
@@ -2738,28 +2634,21 @@ BIF_RETTYPE binary_copy_trap(BIF_ALIST_2)
BIF_TRAP2(&binary_copy_trap_export, BIF_P, BIF_ARG_1, BIF_ARG_2);
} else {
Binary *save;
- ProcBin* pb;
+ Eterm resbin;
Uint target_size = cbs->result->orig_size;
while (pos < target_size) {
- memcpy(t+pos,cbs->source, size);
+ sys_memcpy(t+pos,cbs->source, size);
pos += size;
}
- save = cbs->result;
+ save = cbs->result;
cbs->result = NULL;
cleanup_copy_bin_state(mb); /* now cbs is dead */
- pb = (ProcBin *) HAlloc(BIF_P, PROC_BIN_SIZE);
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = target_size;
- pb->next = MSO(BIF_P).first;
- MSO(BIF_P).first = (struct erl_off_heap_header*) pb;
- pb->val = save;
- pb->bytes = t;
- pb->flags = 0;
-
- OH_OVERHEAD(&(MSO(BIF_P)), target_size / sizeof(Eterm));
- BUMP_REDS(BIF_P,(pos - opos) / BINARY_COPY_LOOP_FACTOR);
- BIF_RET(make_binary(pb));
+ resbin = erts_build_proc_bin(&MSO(BIF_P),
+ HAlloc(BIF_P, PROC_BIN_SIZE),
+ save);
+ BUMP_REDS(BIF_P,(pos - opos) / BINARY_COPY_LOOP_FACTOR);
+ BIF_RET(resbin);
}
}
@@ -3140,7 +3029,7 @@ static void dump_bm_data(BMData *bm)
static void dump_ac_node(ACNode *node, int indent, int ch) {
int i;
char *spaces = erts_alloc(ERTS_ALC_T_TMP, 10 * indent + 1);
- memset(spaces,' ',10*indent);
+ sys_memset(spaces,' ',10*indent);
spaces[10*indent] = '\0';
erts_printf("%s-> %c\n",spaces,ch);
erts_printf("%sId: %u\n",spaces,(unsigned) node->id);
diff --git a/erts/emulator/beam/erl_bif_chksum.c b/erts/emulator/beam/erl_bif_chksum.c
index 9417803e14..cce8472ccb 100644
--- a/erts/emulator/beam/erl_bif_chksum.c
+++ b/erts/emulator/beam/erl_bif_chksum.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -44,7 +44,7 @@ void erts_init_bif_chksum(void)
{
/* Non visual BIF to trap to. */
erts_init_trap_export(&chksum_md5_2_exp,
- am_erlang, am_atom_put("md5_trap",8), 2,
+ am_erlang, ERTS_MAKE_AM("md5_trap"), 2,
&md5_2);
}
@@ -516,7 +516,7 @@ md5_2(BIF_ALIST_2)
/* No need to check context, this function cannot be called with unaligned
or badly sized context as it's always trapped to. */
bytes = binary_bytes(BIF_ARG_1);
- memcpy(&context,bytes,sizeof(MD5_CTX));
+ sys_memcpy(&context,bytes,sizeof(MD5_CTX));
rest = do_chksum(&md5_wrap,BIF_P,BIF_ARG_2,100,(void *) &context,&res,
&err);
if (err != 0) {
@@ -564,7 +564,7 @@ md5_update_2(BIF_ALIST_2)
erts_free_aligned_binary_bytes(temp_alloc);
BIF_ERROR(BIF_P, BADARG);
}
- memcpy(&context,bytes,sizeof(MD5_CTX));
+ sys_memcpy(&context,bytes,sizeof(MD5_CTX));
erts_free_aligned_binary_bytes(temp_alloc);
rest = do_chksum(&md5_wrap,BIF_P,BIF_ARG_2,100,(void *) &context,&res,
&err);
@@ -599,7 +599,7 @@ md5_final_1(BIF_ALIST_1)
goto error;
}
bin = erts_new_heap_binary(BIF_P, (byte *)NULL, 16, &result);
- memcpy(&ctx_copy, context, sizeof(MD5_CTX));
+ sys_memcpy(&ctx_copy, context, sizeof(MD5_CTX));
erts_free_aligned_binary_bytes(temp_alloc);
MD5Final(result, &ctx_copy);
BIF_RET(bin);
diff --git a/erts/emulator/beam/erl_bif_counters.c b/erts/emulator/beam/erl_bif_counters.c
new file mode 100644
index 0000000000..7c8884ba32
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_counters.c
@@ -0,0 +1,255 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: The implementation for 'counters' with 'write_concurrency'.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof */
+
+#include "sys.h"
+#include "export.h"
+#include "bif.h"
+#include "erl_threads.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+
+/*
+ * Each logical counter consists of one 64-bit atomic instance per scheduler
+ * plus one instance for the "base value".
+ *
+ * get() reads all atomics for the counter and returns the sum.
+ * add() reads and writes only its own scheduler specific atomic instance.
+ * put() reads all scheduler specific atomics and writes a new base value.
+ */
+#define ATOMICS_PER_COUNTER (erts_no_schedulers + 1)
+
+#define ATOMICS_PER_CACHE_LINE (ERTS_CACHE_LINE_SIZE / sizeof(erts_atomic64_t))
+
+typedef struct
+{
+ UWord arity;
+#ifdef DEBUG
+ UWord ulen;
+#endif
+ union {
+ erts_atomic64_t v[ATOMICS_PER_CACHE_LINE];
+ byte cache_line__[ERTS_CACHE_LINE_SIZE];
+ } u[1];
+}CountersRef;
+
+static int counters_destructor(Binary *mbin)
+{
+ return 1;
+}
+
+
+static UWord ERTS_INLINE div_ceil(UWord dividend, UWord divisor)
+{
+ return (dividend + divisor - 1) / divisor;
+}
+
+BIF_RETTYPE erts_internal_counters_new_1(BIF_ALIST_1)
+{
+ CountersRef* p;
+ Binary* mbin;
+ UWord ui, vi, cnt;
+ Uint bytes, cache_lines;
+ Eterm* hp;
+
+ if (!term_to_UWord(BIF_ARG_1, &cnt)
+ || cnt == 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (cnt > (ERTS_UWORD_MAX / (sizeof(erts_atomic64_t)*2*ATOMICS_PER_COUNTER)))
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+
+ cache_lines = ATOMICS_PER_COUNTER * div_ceil(cnt, ATOMICS_PER_CACHE_LINE);
+ bytes = offsetof(CountersRef, u) + cache_lines * ERTS_CACHE_LINE_SIZE;
+ mbin = erts_create_magic_binary_x(bytes,
+ counters_destructor,
+ ERTS_ALC_T_ATOMICS,
+ 0);
+ p = ERTS_MAGIC_BIN_DATA(mbin);
+ p->arity = cnt;
+
+#ifdef DEBUG
+ p->ulen = cache_lines;
+#endif
+ ASSERT((byte*)&p->u[cache_lines] <= ((byte*)p + bytes));
+ for (ui=0; ui < cache_lines; ui++)
+ for (vi=0; vi < ATOMICS_PER_CACHE_LINE; vi++)
+ erts_atomic64_init_nob(&p->u[ui].v[vi], 0);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(BIF_P), mbin);
+}
+
+static ERTS_INLINE int get_ref(Eterm ref, CountersRef** pp)
+{
+ Binary* mbin;
+ if (!is_internal_magic_ref(ref))
+ return 0;
+
+ mbin = erts_magic_ref2bin(ref);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != counters_destructor)
+ return 0;
+ *pp = ERTS_MAGIC_BIN_DATA(mbin);
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app,
+ UWord sched_ix)
+{
+ CountersRef* p;
+ UWord ix, ui, vi;
+ if (!get_ref(ref, &p) || !term_to_UWord(index, &ix) || --ix >= p->arity)
+ return 0;
+ ui = (ix / ATOMICS_PER_CACHE_LINE) * ATOMICS_PER_COUNTER + sched_ix;
+ vi = ix % ATOMICS_PER_CACHE_LINE;
+ ASSERT(ui < p->ulen);
+ *pp = p;
+ *app = &p->u[ui].v[vi];
+ return 1;
+}
+
+static ERTS_INLINE int get_ref_my_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+ ASSERT(esdp->no > 0 && esdp->no < ATOMICS_PER_COUNTER);
+ return get_ref_cnt(ref, index, pp, app, esdp->no);
+}
+
+static ERTS_INLINE int get_ref_first_cnt(Eterm ref, Eterm index,
+ CountersRef** pp,
+ erts_atomic64_t** app)
+{
+ return get_ref_cnt(ref, index, pp, app, 0);
+}
+
+static ERTS_INLINE int get_incr(CountersRef* p, Eterm term, erts_aint64_t *valp)
+{
+ return (term_to_Sint64(term, (Sint64*)valp)
+ || term_to_Uint64(term, (Uint64*)valp));
+}
+
+static ERTS_INLINE Eterm bld_counter(Process* proc, CountersRef* p,
+ erts_aint64_t val)
+{
+ if (IS_SSMALL(val))
+ return make_small((Sint) val);
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(val);
+ Eterm* hp = HAlloc(proc, hsz);
+ return erts_sint64_to_big(val, &hp);
+ }
+}
+
+BIF_RETTYPE erts_internal_counters_get_2(BIF_ALIST_2)
+{
+ CountersRef* p;
+ erts_atomic64_t* ap;
+ erts_aint64_t acc = 0;
+ int j;
+
+ if (!get_ref_first_cnt(BIF_ARG_1, BIF_ARG_2, &p, &ap)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ for (j = ATOMICS_PER_COUNTER; j ; --j) {
+ acc += erts_atomic64_read_nob(ap);
+ ap = (erts_atomic64_t*) ((byte*)ap + ERTS_CACHE_LINE_SIZE);
+ }
+ return bld_counter(BIF_P, p, acc);
+}
+
+BIF_RETTYPE erts_internal_counters_add_3(BIF_ALIST_3)
+{
+ CountersRef* p;
+ erts_atomic64_t* ap;
+ erts_aint64_t incr, sum;
+
+ if (!get_ref_my_cnt(BIF_ARG_1, BIF_ARG_2, &p, &ap)
+ || !get_incr(p, BIF_ARG_3, &incr)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ sum = incr + erts_atomic64_read_nob(ap);
+ erts_atomic64_set_nob(ap, sum);
+ return am_ok;
+}
+
+BIF_RETTYPE erts_internal_counters_put_3(BIF_ALIST_3)
+{
+ CountersRef* p;
+ erts_atomic64_t* first_ap;
+ erts_atomic64_t* ap;
+ erts_aint64_t acc;
+ erts_aint64_t val;
+ int j;
+
+ if (!get_ref_first_cnt(BIF_ARG_1, BIF_ARG_2, &p, &first_ap)
+ || !term_to_Sint64(BIF_ARG_3, &val)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ap = first_ap;
+ acc = 0;
+ j = ATOMICS_PER_COUNTER - 1;
+ do {
+ ap = (erts_atomic64_t*) ((byte*)ap + ERTS_CACHE_LINE_SIZE);
+ acc += erts_atomic64_read_nob(ap);
+ } while (--j);
+ erts_atomic64_set_nob(first_ap, val-acc);
+
+ return am_ok;
+}
+
+BIF_RETTYPE erts_internal_counters_info_1(BIF_ALIST_1)
+{
+ CountersRef* p;
+ Uint hsz = MAP2_SZ;
+ Eterm *hp;
+ UWord memory;
+ Eterm sz_val, mem_val;
+
+ if (!get_ref(BIF_ARG_1, &p))
+ BIF_ERROR(BIF_P, BADARG);
+
+ memory = erts_magic_ref2bin(BIF_ARG_1)->orig_size;
+ erts_bld_uword(NULL, &hsz, p->arity);
+ erts_bld_uword(NULL, &hsz, memory);
+
+ hp = HAlloc(BIF_P, hsz);
+ sz_val = erts_bld_uword(&hp, NULL, p->arity);
+ mem_val = erts_bld_uword(&hp, NULL, memory);
+
+ return MAP2(hp, am_memory, mem_val,
+ am_size, sz_val);
+}
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index ef77201544..4cda0948a0 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -50,13 +50,6 @@
#include "dtrace-wrapper.h"
#include "lttng-wrapper.h"
-#ifdef ERTS_SMP
-#define DDLL_SMP 1
-#else
-#define DDLL_SMP 0
-#endif
-
-
/*
* Local types
*/
@@ -107,18 +100,18 @@ static void dereference_all_processes(DE_Handle *dh);
static void restore_process_references(DE_Handle *dh);
static void ddll_no_more_references(void *vdh);
-#define lock_drv_list() erts_smp_rwmtx_rwlock(&erts_driver_list_lock)
-#define unlock_drv_list() erts_smp_rwmtx_rwunlock(&erts_driver_list_lock)
+#define lock_drv_list() erts_rwmtx_rwlock(&erts_driver_list_lock)
+#define unlock_drv_list() erts_rwmtx_rwunlock(&erts_driver_list_lock)
#define assert_drv_list_locked() \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
- || erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
+ || erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
#define assert_drv_list_rwlocked() \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock))
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock))
#define assert_drv_list_rlocked() \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
#define assert_drv_list_not_locked() \
- ERTS_SMP_LC_ASSERT(!erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
- && !erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
+ ERTS_LC_ASSERT(!erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
+ && !erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
#define FREE_PORT_FLAGS (ERTS_PORT_SFLGS_DEAD & (~ERTS_PORT_SFLG_INITIALIZING))
@@ -134,13 +127,13 @@ kill_ports_driver_unloaded(DE_Handle *dh)
if (!prt)
continue;
- ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+ ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
state = erts_atomic32_read_nob(&prt->state);
if (state & FREE_PORT_FLAGS)
continue;
- erts_smp_port_lock(prt);
+ erts_port_lock(prt);
state = erts_atomic32_read_nob(&prt->state);
if (!(state & ERTS_PORT_SFLGS_DEAD) && prt->drv_ptr->handle == dh)
@@ -280,10 +273,8 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
path[path_len++] = '/';
sys_strcpy(path+path_len,name);
-#if DDLL_SMP
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) != NULL) {
if (drv->handle == NULL) {
/* static_driver */
@@ -404,24 +395,18 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
erts_ddll_reference_driver(dh);
ASSERT(dh->status == ERL_DE_RELOAD);
dh->status = ERL_DE_FORCE_RELOAD;
-#if DDLL_SMP
unlock_drv_list();
-#endif
kill_ports_driver_unloaded(dh);
/* Dereference, eventually causing driver destruction */
-#if DDLL_SMP
lock_drv_list();
-#endif
erts_ddll_dereference_driver(dh);
}
-#if DDLL_SMP
erts_ddll_reference_driver(dh);
unlock_drv_list();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
erts_ddll_dereference_driver(dh);
-#endif
BIF_P->flags |= F_USING_DDLL;
if (monitor) {
@@ -432,18 +417,14 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_ok, ok_term);
}
-#if DDLL_SMP
unlock_drv_list();
-#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
BIF_RET(t);
soft_error:
-#if DDLL_SMP
unlock_drv_list();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (do_build_load_error) {
soft_error_term = build_load_error(BIF_P, build_this_load_error);
}
@@ -452,11 +433,11 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
t = TUPLE2(hp, am_error, soft_error_term);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
BIF_RET(t);
error:
assert_drv_list_not_locked();
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
if (path != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
}
@@ -518,7 +499,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
Eterm l;
int kill_ports = 0;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
for(l = options; is_list(l); l = CDR(list_val(l))) {
Eterm opt = CAR(list_val(l));
@@ -551,9 +532,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
goto error;
}
-#if DDLL_SMP
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) == NULL) {
soft_error_term = am_not_loaded;
@@ -597,7 +576,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
dh->reload_full_path = dh->reload_driver_name = NULL;
dh->reload_flags = 0;
}
- if (erts_smp_atomic32_read_nob(&dh->port_count) > 0) {
+ if (erts_atomic32_read_nob(&dh->port_count) > 0) {
++kill_ports;
}
dh->status = ERL_DE_UNLOAD;
@@ -608,23 +587,17 @@ done:
/* Avoid closing the driver by referencing it */
erts_ddll_reference_driver(dh);
dh->status = ERL_DE_FORCE_UNLOAD;
-#if DDLL_SMP
unlock_drv_list();
-#endif
kill_ports_driver_unloaded(dh);
-#if DDLL_SMP
lock_drv_list();
-#endif
erts_ddll_dereference_driver(dh);
}
-#if DDLL_SMP
erts_ddll_reference_driver(dh);
unlock_drv_list();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
erts_ddll_dereference_driver(dh);
-#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
BIF_P->flags |= F_USING_DDLL;
if (monitor > 0) {
@@ -638,17 +611,13 @@ done:
if (kill_ports > 1) {
ERTS_BIF_CHK_EXITED(BIF_P); /* May be exited by port killing */
}
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(t);
soft_error:
-#if DDLL_SMP
unlock_drv_list();
-#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_error, soft_error_term);
BIF_RET(t);
@@ -658,7 +627,7 @@ soft_error:
if (name != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
}
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_ERROR(BIF_P, BADARG);
}
@@ -697,9 +666,7 @@ BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0)
int need = 3;
Eterm res = NIL;
erts_driver_t *drv;
-#if DDLL_SMP
lock_drv_list();
-#endif
for (drv = driver_list; drv; drv = drv->next) {
need += sys_strlen(drv->name)*2+2;
}
@@ -712,9 +679,7 @@ BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0)
}
res = TUPLE2(hp,am_ok,res);
/* hp += 3 */
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(res);
}
@@ -736,9 +701,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
Eterm *hp;
int i;
Uint filter;
-#if DDLL_SMP
int have_lock = 0;
-#endif
if ((name = pick_list_or_atom(name_term)) == NULL) {
goto error;
@@ -748,10 +711,8 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
goto error;
}
-#if DDLL_SMP
lock_drv_list();
have_lock = 1;
-#endif
if ((drv = lookup_driver(name)) == NULL) {
goto error;
}
@@ -781,7 +742,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
} else if (drv->handle->status == ERL_DE_PERMANENT) {
res = am_permanent;
} else {
- res = make_small(erts_smp_atomic32_read_nob(&drv->handle->port_count));
+ res = make_small(erts_atomic32_read_nob(&drv->handle->port_count));
}
goto done;
case am_linked_in_driver:
@@ -827,9 +788,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
hp += 2;
}
done:
-#if DDLL_SMP
unlock_drv_list();
-#endif
if (pei)
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, pei);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
@@ -838,11 +797,9 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
if (name != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
}
-#if DDLL_SMP
if (have_lock) {
unlock_drv_list();
}
-#endif
BIF_ERROR(p,BADARG);
}
@@ -899,13 +856,9 @@ BIF_RETTYPE erl_ddll_format_error_int_1(BIF_ALIST_1)
if (errdesc_to_code(code_term,&errint) != 0) {
goto error;
}
-#if DDLL_SMP
lock_drv_list();
-#endif
errstring = erts_ddll_error(errint);
-#if DDLL_SMP
unlock_drv_list();
-#endif
break;
}
if (errstring == NULL) {
@@ -968,7 +921,7 @@ Eterm erts_ddll_monitor_driver(Process *p,
void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks)
{
erts_driver_t *drv;
- erts_smp_proc_unlock(p, plocks);
+ erts_proc_unlock(p, plocks);
lock_drv_list();
drv = driver_list;
while (drv != NULL) {
@@ -993,7 +946,7 @@ void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks)
}
done:
unlock_drv_list();
- erts_smp_proc_lock(p, plocks);
+ erts_proc_lock(p, plocks);
}
/*
@@ -1002,7 +955,7 @@ void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks)
void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
{
erts_driver_t *drv;
- erts_smp_proc_unlock(p, plocks);
+ erts_proc_unlock(p, plocks);
lock_drv_list();
drv = driver_list;
while (drv != NULL) {
@@ -1040,18 +993,14 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
dh->status = ERL_DE_UNLOAD;
}
if (!left
- && erts_smp_atomic32_read_nob(&drv->handle->port_count) > 0) {
+ && erts_atomic32_read_nob(&drv->handle->port_count) > 0) {
if (kill_ports) {
DE_Handle *dh = drv->handle;
erts_ddll_reference_driver(dh);
dh->status = ERL_DE_FORCE_UNLOAD;
-#if DDLL_SMP
unlock_drv_list();
-#endif
kill_ports_driver_unloaded(dh);
-#if DDLL_SMP
lock_drv_list(); /* Needed for future list operations */
-#endif
drv = drv->next; /* before allowing destruction */
erts_ddll_dereference_driver(dh);
} else {
@@ -1065,7 +1014,7 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
}
}
unlock_drv_list();
- erts_smp_proc_lock(p, plocks);
+ erts_proc_lock(p, plocks);
}
void erts_ddll_lock_driver(DE_Handle *dh, char *name)
{
@@ -1093,16 +1042,16 @@ void erts_ddll_lock_driver(DE_Handle *dh, char *name)
void erts_ddll_increment_port_count(DE_Handle *dh)
{
assert_drv_list_locked();
- erts_smp_atomic32_inc_nob(&dh->port_count);
+ erts_atomic32_inc_nob(&dh->port_count);
}
void erts_ddll_decrement_port_count(DE_Handle *dh)
{
assert_drv_list_locked();
-#if DEBUG
- ASSERT(erts_smp_atomic32_dec_read_nob(&dh->port_count) >= 0);
+#ifdef DEBUG
+ ASSERT(erts_atomic32_dec_read_nob(&dh->port_count) >= 0);
#else
- erts_smp_atomic32_dec_nob(&dh->port_count);
+ erts_atomic32_dec_nob(&dh->port_count);
#endif
}
@@ -1281,10 +1230,8 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro
Eterm immediate_type = NIL;
erts_driver_t *drv;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
-#if DDLL_SMP
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) == NULL) {
immediate_tag = am_unloaded;
immediate_type = am_DOWN;
@@ -1314,20 +1261,14 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro
}
p->flags |= F_USING_DDLL;
r = add_monitor(p, drv->handle, ERL_DE_PROC_AWAIT_LOAD);
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(r);
immediate:
r = erts_make_ref(p);
-#if DDLL_SMP
- erts_smp_proc_unlock(p, plocks);
-#endif
+ erts_proc_unlock(p, plocks);
notify_proc(p, r, name_term, immediate_type, immediate_tag, 0);
-#if DDLL_SMP
unlock_drv_list();
- erts_smp_proc_lock(p, plocks);
-#endif
+ erts_proc_lock(p, plocks);
BIF_RET(r);
}
@@ -1338,10 +1279,8 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP
Eterm immediate_type = NIL;
erts_driver_t *drv;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
-#if DDLL_SMP
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) == NULL) {
immediate_tag = am_unloaded;
immediate_type = am_DOWN;
@@ -1355,20 +1294,14 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP
p->flags |= F_USING_DDLL;
r = add_monitor(p, drv->handle, flag);
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(r);
immediate:
r = erts_make_ref(p);
-#if DDLL_SMP
- erts_smp_proc_unlock(p, plocks);
-#endif
+ erts_proc_unlock(p, plocks);
notify_proc(p, r, name_term, immediate_type, immediate_tag, 0);
-#if DDLL_SMP
unlock_drv_list();
- erts_smp_proc_lock(p, plocks);
-#endif
+ erts_proc_lock(p, plocks);
BIF_RET(r);
}
@@ -1476,8 +1409,10 @@ static void add_proc_loaded_deref(DE_Handle *dh, Process *proc)
static Eterm copy_ref(Eterm ref, Eterm *hp)
{
- RefThing *ptr = ref_thing_ptr(ref);
- memcpy(hp, ptr, sizeof(RefThing));
+ ErtsORefThing *ptr;
+ ASSERT(is_internal_ordinary_ref(ref));
+ ptr = ordinary_ref_thing_ptr(ref);
+ sys_memcpy(hp, ptr, sizeof(ErtsORefThing));
return (make_internal_ref(hp));
}
@@ -1519,9 +1454,9 @@ static void set_driver_reloading(DE_Handle *dh, Process *proc, char *path, char
dh->procs = p;
dh->status = ERL_DE_RELOAD;
dh->reload_full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1);
- strcpy(dh->reload_full_path,path);
+ sys_strcpy(dh->reload_full_path,path);
dh->reload_driver_name = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(name) + 1);
- strcpy(dh->reload_driver_name,name);
+ sys_strcpy(dh->reload_driver_name,name);
dh->reload_flags = flags;
}
@@ -1566,18 +1501,19 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
goto error;
}
- if (strcmp(name, dp->driver_name) != 0) {
+ if (sys_strcmp(name, dp->driver_name) != 0) {
res = ERL_DE_LOAD_ERROR_BAD_NAME;
goto error;
}
- erts_smp_atomic_init_nob(&(dh->refc), (erts_aint_t) 0);
- erts_smp_atomic32_init_nob(&dh->port_count, 0);
+
+ erts_atomic_init_nob(&(dh->refc), (erts_aint_t) 0);
+ erts_atomic32_init_nob(&dh->port_count, 0);
dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1);
sys_strcpy(dh->full_path, path);
dh->flags = 0;
dh->status = ERL_DE_OK;
- if (erts_add_driver_entry(dp, dh, 1) != 0 /* io.c */) {
+ if (erts_add_driver_entry(dp, dh, 1, 1) != 0 /* io.c */) {
/*
* The init in the driver struct did not return 0
*/
@@ -1642,7 +1578,7 @@ static int load_driver_entry(DE_Handle **dhp, char *path, char *name)
dh->handle = NULL;
dh->procs = NULL;
- erts_smp_atomic32_init_nob(&dh->port_count, 0);
+ erts_atomic32_init_nob(&dh->port_count, 0);
erts_refc_init(&(dh->refc), (erts_aint_t) 0);
dh->status = -1;
dh->reload_full_path = NULL;
@@ -1712,7 +1648,7 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
ErtsMessage *mp;
ErtsProcLocks rp_locks = 0;
ErlOffHeap *ohp;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
assert_drv_list_rwlocked();
if (errcode != 0) {
@@ -1720,10 +1656,10 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
Eterm e;
mp = erts_alloc_message_heap(proc, &rp_locks,
(6 /* tuple */ + 3 /* Error tuple */ +
- REF_THING_SIZE + need),
+ ERTS_REF_THING_SIZE + need),
&hp, &ohp);
r = copy_ref(ref,hp);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
e = build_load_error_hp(hp, errcode);
hp += need;
mess = TUPLE2(hp,tag,e);
@@ -1731,15 +1667,15 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
mess = TUPLE5(hp,type,r,am_driver,driver_name,mess);
} else {
mp = erts_alloc_message_heap(proc, &rp_locks,
- 6 /* tuple */ + REF_THING_SIZE,
+ 6 /* tuple */ + ERTS_REF_THING_SIZE,
&hp, &ohp);
r = copy_ref(ref,hp);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
mess = TUPLE5(hp,type,r,am_driver,driver_name,tag);
}
erts_queue_message(proc, rp_locks, mp, mess, am_system);
- erts_smp_proc_unlock(proc, rp_locks);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ erts_proc_unlock(proc, rp_locks);
+ ERTS_CHK_NO_PROC_LOCKS;
}
static void notify_all(DE_Handle *dh, char *name, Uint awaiting, Eterm type, Eterm tag)
@@ -1811,7 +1747,7 @@ static Eterm build_load_error(Process *p, int code)
{
int need = load_error_need(code);
Eterm *hp = NULL;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
if (need) {
hp = HAlloc(p,need);
}
@@ -1864,7 +1800,7 @@ static char *pick_list_or_atom(Eterm name_term)
goto error;
}
name = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, ap->len + 1);
- memcpy(name,ap->name,ap->len);
+ sys_memcpy(name,ap->name,ap->len);
name[ap->len] = '\0';
} else {
if (erts_iolist_size(name_term, &name_len)) {
@@ -1935,7 +1871,7 @@ static erts_driver_t *lookup_driver(char *name)
{
erts_driver_t *drv;
assert_drv_list_locked();
- for (drv = driver_list; drv != NULL && strcmp(drv->name, name); drv = drv->next)
+ for (drv = driver_list; drv != NULL && sys_strcmp(drv->name, name); drv = drv->next)
;
return drv;
}
diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c
index b42d2dc28b..8a5c6ada6c 100644
--- a/erts/emulator/beam/erl_bif_guard.c
+++ b/erts/emulator/beam/erl_bif_guard.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -141,6 +141,39 @@ BIF_RETTYPE trunc_1(BIF_ALIST_1)
BIF_RET(res);
}
+BIF_RETTYPE floor_1(BIF_ALIST_1)
+{
+ Eterm res;
+ FloatDef f;
+
+ if (is_not_float(BIF_ARG_1)) {
+ if (is_integer(BIF_ARG_1))
+ BIF_RET(BIF_ARG_1);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ GET_DOUBLE(BIF_ARG_1, f);
+ res = double_to_integer(BIF_P, floor(f.fd));
+ BIF_RET(res);
+}
+
+BIF_RETTYPE ceil_1(BIF_ALIST_1)
+{
+ Eterm res;
+ FloatDef f;
+
+ /* check arg */
+ if (is_not_float(BIF_ARG_1)) {
+ if (is_integer(BIF_ARG_1))
+ BIF_RET(BIF_ARG_1);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ /* get the float */
+ GET_DOUBLE(BIF_ARG_1, f);
+
+ res = double_to_integer(BIF_P, ceil(f.fd));
+ BIF_RET(res);
+}
+
BIF_RETTYPE round_1(BIF_ALIST_1)
{
Eterm res;
@@ -157,7 +190,7 @@ BIF_RETTYPE round_1(BIF_ALIST_1)
GET_DOUBLE(BIF_ARG_1, f);
/* round it and return the resultant integer */
- res = double_to_integer(BIF_P, (f.fd > 0.0) ? f.fd + 0.5 : f.fd - 0.5);
+ res = double_to_integer(BIF_P, round(f.fd));
BIF_RET(res);
}
@@ -597,8 +630,7 @@ Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live)
}
GET_DOUBLE(arg, f);
- return gc_double_to_integer(p, (f.fd > 0.0) ? f.fd + 0.5 : f.fd - 0.5,
- reg, live);
+ return gc_double_to_integer(p, round(f.fd), reg, live);
}
Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live)
@@ -621,6 +653,38 @@ Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live)
reg, live);
}
+Eterm erts_gc_floor_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ FloatDef f;
+
+ arg = reg[live];
+ if (is_not_float(arg)) {
+ if (is_integer(arg)) {
+ return arg;
+ }
+ BIF_ERROR(p, BADARG);
+ }
+ GET_DOUBLE(arg, f);
+ return gc_double_to_integer(p, floor(f.fd), reg, live);
+}
+
+Eterm erts_gc_ceil_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ FloatDef f;
+
+ arg = reg[live];
+ if (is_not_float(arg)) {
+ if (is_integer(arg)) {
+ return arg;
+ }
+ BIF_ERROR(p, BADARG);
+ }
+ GET_DOUBLE(arg, f);
+ return gc_double_to_integer(p, ceil(f.fd), reg, live);
+}
+
static Eterm
gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live)
{
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 29ba12dfdb..7fada0d548 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,7 +38,7 @@
#include "erl_message.h"
#include "erl_binary.h"
#include "erl_db.h"
-#include "erl_instrument.h"
+#include "erl_mtrace.h"
#include "dist.h"
#include "erl_gc.h"
#include "erl_cpu_topology.h"
@@ -46,8 +46,12 @@
#include "erl_thr_progress.h"
#include "erl_bif_unique.h"
#include "erl_map.h"
+#include "erl_check_io.h"
#define ERTS_PTAB_WANT_DEBUG_FUNCS__
#include "erl_ptab.h"
+#include "erl_time.h"
+#include "erl_proc_sig_queue.h"
+#include "erl_alloc_util.h"
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -70,6 +74,9 @@ static Export *gather_msacc_res_trap;
static Export *gather_gc_info_res_trap;
static Export *gather_system_check_res_trap;
+static Export *is_process_alive_trap;
+
+
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
static char otp_version[] = ERLANG_OTP_VERSION;
@@ -87,21 +94,15 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
#ifdef ARCH_64
" [64-bit]"
#endif
-#ifdef ERTS_SMP
" [smp:%beu:%beu]"
-#endif
-#ifdef USE_THREADS
-#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
" [ds:%beu:%beu:%beu]"
+#if defined(ERTS_DIRTY_SCHEDULERS_TEST)
+ " [dirty-schedulers-TEST]"
#endif
" [async-threads:%d]"
-#endif
#ifdef HIPE
" [hipe]"
#endif
-#ifdef ERTS_ENABLE_KERNEL_POLL
- " [kernel-poll:%s]"
-#endif
#ifdef ET_DEBUG
#if ET_DEBUG
" [type-assertions]"
@@ -155,8 +156,10 @@ static Eterm os_type_tuple;
static Eterm os_version_tuple;
static Eterm
-current_function(Process* p, Process* rp, Eterm** hpp, int full_info);
-static Eterm current_stacktrace(Process* p, Process* rp, Eterm** hpp);
+current_function(Process* p, ErtsHeapFactory *hfact, Process* rp,
+ int full_info, Uint reserve_size, int flags);
+static Eterm current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
+ Uint reserve_size);
static Eterm
bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
@@ -174,7 +177,33 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
if (szp)
*szp += 4+2;
if (hpp) {
- Uint refc = (Uint) erts_smp_atomic_read_nob(&pb->val->refc);
+ Uint refc = (Uint) erts_refc_read(&pb->val->intern.refc, 1);
+ tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
+ res = CONS(*hpp + 4, tuple, res);
+ *hpp += 4+2;
+ }
+ }
+ }
+ return res;
+}
+
+static Eterm
+bld_magic_ref_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
+{
+ struct erl_off_heap_header* ohh;
+ Eterm res = NIL;
+ Eterm tuple;
+
+ for (ohh = oh->first; ohh; ohh = ohh->next) {
+ if (is_ref_thing_header((*((Eterm *) ohh)))) {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) ohh;
+ Eterm val = erts_bld_uword(hpp, szp, (UWord) mrtp->mb);
+ Eterm orig_size = erts_bld_uint(hpp, szp, mrtp->mb->orig_size);
+
+ if (szp)
+ *szp += 4+2;
+ if (hpp) {
+ Uint refc = (Uint) erts_refc_read(&mrtp->mb->intern.refc, 1);
tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
res = CONS(*hpp + 4, tuple, res);
*hpp += 4+2;
@@ -189,19 +218,27 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
make_monitor_list:
returns a list of records..
-record(erl_monitor, {
- type, % MON_ORIGIN or MON_TARGET (1 or 3)
- ref,
+ type, % process | port | time_offset | dist_process | resource
+ % | node | nodes | suspend
+ dir, % origin | target
+ ref, % reference or []
pid, % Process or nodename
- name % registered name or []
+ extra % registered name, integer or []
}).
*/
static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
{
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
Uint *psz = vpsz;
- *psz += IS_CONST(mon->ref) ? 0 : NC_HEAP_SIZE(mon->ref);
- *psz += IS_CONST(mon->pid) ? 0 : NC_HEAP_SIZE(mon->pid);
- *psz += 8; /* CONS + 5-tuple */
+ *psz += is_immed(mdp->ref) ? 0 : NC_HEAP_SIZE(mdp->ref);
+
+ if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
+ *psz += erts_resource_ref_size(mon->other.ptr);
+ else
+ *psz += is_immed(mon->other.item) ? 0 : NC_HEAP_SIZE(mon->other.item);
+
+ *psz += 9; /* CONS + 6-tuple */
}
typedef struct {
@@ -213,36 +250,97 @@ typedef struct {
static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
{
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
MonListContext *pmlc = vpmlc;
- Eterm tup;
- Eterm r = (IS_CONST(mon->ref)
- ? mon->ref
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
- Eterm p = (IS_CONST(mon->pid)
- ? mon->pid
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
- tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
- pmlc->hp += 6;
+ Eterm tup, t, d, r, p, x;
+
+ r = is_immed(mdp->ref) ? mdp->ref : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mdp->ref);
+ if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
+ p = erts_bld_resource_ref(&(pmlc->hp), &MSO(pmlc->p), mon->other.ptr);
+ else
+ p = (is_immed(mon->other.item)
+ ? mon->other.item
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->other.item));
+
+ if (mon->flags & ERTS_ML_FLG_NAME)
+ x = ((ErtsMonitorDataExtended *) mdp)->u.name;
+ else if (erts_monitor_is_target(mon))
+ x = NIL;
+ else if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES)
+ x = make_small(((ErtsMonitorDataExtended *) mdp)->u.refc);
+ else
+ x = NIL;
+
+ switch (mon->type) {
+ case ERTS_MON_TYPE_PROC:
+ t = am_process;
+ break;
+ case ERTS_MON_TYPE_PORT:
+ t = am_port;
+ break;
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ t = am_time_offset;
+ break;
+ case ERTS_MON_TYPE_DIST_PROC: {
+ ERTS_DECL_AM(dist_process);
+ t = AM_dist_process;
+ break;
+ }
+ case ERTS_MON_TYPE_RESOURCE: {
+ ERTS_DECL_AM(resource);
+ t = AM_resource;
+ break;
+ }
+ case ERTS_MON_TYPE_NODE:
+ t = am_node;
+ break;
+ case ERTS_MON_TYPE_NODES: {
+ ERTS_DECL_AM(nodes);
+ t = AM_nodes;
+ break;
+ }
+ case ERTS_MON_TYPE_SUSPEND:
+ t = am_suspend;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unknown monitor type");
+ t = am_error;
+ break;
+ }
+ if (erts_monitor_is_target(mon)) {
+ ERTS_DECL_AM(target);
+ d = AM_target;
+ }
+ else {
+ ERTS_DECL_AM(origin);
+ d = AM_origin;
+ }
+ tup = TUPLE6(pmlc->hp, pmlc->tag, t, d, r, p, x);
+ pmlc->hp += 7;
pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
pmlc->hp += 2;
}
static Eterm
-make_monitor_list(Process *p, ErtsMonitor *root)
+make_monitor_list(Process *p, int tree, ErtsMonitor *root, Eterm tail)
{
DECL_AM(erl_monitor);
Uint sz = 0;
MonListContext mlc;
+ void (*foreach)(ErtsMonitor *,
+ void (*)(ErtsMonitor *, void *),
+ void *);
- erts_doforall_monitors(root, &do_calc_mon_size, &sz);
- if (sz == 0) {
- return NIL;
- }
+ foreach = tree ? erts_monitor_tree_foreach : erts_monitor_list_foreach;
+
+ (*foreach)(root, do_calc_mon_size, &sz);
+ if (sz == 0)
+ return tail;
mlc.p = p;
mlc.hp = HAlloc(p,sz);
- mlc.res = NIL;
+ mlc.res = tail;
mlc.tag = AM_erl_monitor;
- erts_doforall_monitors(root, &do_make_one_mon_element, &mlc);
+ (*foreach)(root, do_make_one_mon_element, &mlc);
return mlc.res;
}
@@ -250,20 +348,22 @@ make_monitor_list(Process *p, ErtsMonitor *root)
make_link_list:
returns a list of records..
-record(erl_link, {
- type, % LINK_NODE or LINK_PID (1 or 3)
- pid, % Process or nodename
- targets % List of erl_link's or nil
+ type, % process | port | dist_process
+ pid, % Process or port
+ id % (address)
}).
*/
-static void do_calc_lnk_size(ErtsLink *lnk, void *vpsz)
+static void calc_lnk_size(ErtsLink *lnk, void *vpsz)
{
Uint *psz = vpsz;
- *psz += IS_CONST(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
- if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
- /* Node links use this pointer as ref counter... */
- erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_calc_lnk_size,vpsz);
- }
+ Uint sz = 0;
+ ErtsLinkData *ldp = erts_link_to_data(lnk);
+
+ (void) erts_bld_uword(NULL, &sz, (UWord) ldp);
+
+ *psz += sz;
+ *psz += is_immed(lnk->other.item) ? 0 : size_object(lnk->other.item);
*psz += 7; /* CONS + 4-tuple */
}
@@ -274,37 +374,58 @@ typedef struct {
Eterm tag;
} LnkListContext;
-static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
+static void make_one_lnk_element(ErtsLink *lnk, void * vpllc)
{
LnkListContext *pllc = vpllc;
- Eterm tup;
- Eterm old_res, targets = NIL;
- Eterm p = (IS_CONST(lnk->pid)
- ? lnk->pid
- : STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
- if (lnk->type == LINK_NODE) {
- targets = make_small(ERTS_LINK_REFC(lnk));
- } else if (ERTS_LINK_ROOT(lnk) != NULL) {
- old_res = pllc->res;
- pllc->res = NIL;
- erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_make_one_lnk_element, vpllc);
- targets = pllc->res;
- pllc->res = old_res;
- }
- tup = TUPLE4(pllc->hp, pllc->tag, make_small(lnk->type), p, targets);
+ Eterm tup, t, pid, id;
+ ErtsLinkData *ldp = erts_link_to_data(lnk);
+
+ id = erts_bld_uword(&pllc->hp, NULL, (UWord) ldp);
+
+ if (is_immed(lnk->other.item))
+ pid = lnk->other.item;
+ else {
+ Uint sz = size_object(lnk->other.item);
+ pid = copy_struct(lnk->other.item, sz, &(pllc->hp), &MSO(pllc->p));
+ }
+
+ switch (lnk->type) {
+ case ERTS_LNK_TYPE_PROC:
+ t = am_process;
+ break;
+ case ERTS_LNK_TYPE_PORT:
+ t = am_port;
+ break;
+ case ERTS_LNK_TYPE_DIST_PROC: {
+ ERTS_DECL_AM(dist_process);
+ t = AM_dist_process;
+ break;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Unkown link type");
+ t = am_undefined;
+ break;
+ }
+
+ tup = TUPLE4(pllc->hp, pllc->tag, t, pid, id);
pllc->hp += 5;
pllc->res = CONS(pllc->hp, tup, pllc->res);
pllc->hp += 2;
}
static Eterm
-make_link_list(Process *p, ErtsLink *root, Eterm tail)
+make_link_list(Process *p, int tree, ErtsLink *root, Eterm tail)
{
DECL_AM(erl_link);
Uint sz = 0;
LnkListContext llc;
+ void (*foreach)(ErtsLink *,
+ void (*)(ErtsLink *, void *),
+ void *);
+
+ foreach = tree ? erts_link_tree_foreach : erts_link_list_foreach;
- erts_doforall_links(root, &do_calc_lnk_size, &sz);
+ (*foreach)(root, calc_lnk_size, (void *) &sz);
if (sz == 0) {
return tail;
}
@@ -312,25 +433,23 @@ make_link_list(Process *p, ErtsLink *root, Eterm tail)
llc.hp = HAlloc(p,sz);
llc.res = tail;
llc.tag = AM_erl_link;
- erts_doforall_links(root, &do_make_one_lnk_element, &llc);
+ (*foreach)(root, make_one_lnk_element, (void *) &llc);
return llc.res;
}
int
-erts_print_system_version(int to, void *arg, Process *c_p)
+erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
{
int i, rc = -1;
char *rc_str = "";
char rc_buf[100];
char *ov = otp_version;
-#ifdef ERTS_SMP
Uint total, online, active;
Uint dirty_cpu, dirty_cpu_onln, dirty_io;
erts_schedulers_state(&total, &online, &active,
&dirty_cpu, &dirty_cpu_onln, NULL,
&dirty_io, NULL);
-#endif
for (i = 0; i < sizeof(otp_version)-4; i++) {
if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c')
rc = atoi(&ov[i+3]);
@@ -345,26 +464,23 @@ erts_print_system_version(int to, void *arg, Process *c_p)
}
return erts_print(to, arg, erts_system_version,
rc_str
-#ifdef ERTS_SMP
, total, online
-#ifdef ERTS_DIRTY_SCHEDULERS
, dirty_cpu, dirty_cpu_onln, dirty_io
-#endif
-#endif
-#ifdef USE_THREADS
, erts_async_max_threads
-#endif
-#ifdef ERTS_ENABLE_KERNEL_POLL
- , erts_use_kernel_poll ? "true" : "false"
-#endif
);
}
typedef struct {
/* {Entity,Node} = {monitor.Name,monitor.Pid} for external by name
* {Entity,Node} = {monitor.Pid,NIL} for external/external by pid
- * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name */
- Eterm entity;
+ * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name
+ * {Entity,Node} = {monitor.resource,MON_NIF_TARGET}*/
+ union {
+ Eterm term;
+ ErtsResource* resource;
+ }entity;
+ int named;
+ Uint16 type;
Eterm node;
/* pid is actual target being monitored, no matter pid/port or name */
Eterm pid;
@@ -407,77 +523,107 @@ static void collect_one_link(ErtsLink *lnk, void *vmicp)
{
MonitorInfoCollection *micp = vmicp;
EXTEND_MONITOR_INFOS(micp);
- if (!(lnk->type == LINK_PID)) {
- return;
- }
- micp->mi[micp->mi_i].entity = lnk->pid;
- micp->sz += 2 + NC_HEAP_SIZE(lnk->pid);
+ micp->mi[micp->mi_i].entity.term = lnk->other.item;
+ micp->sz += 2 + NC_HEAP_SIZE(lnk->other.item);
micp->mi_i++;
}
static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
{
- MonitorInfoCollection *micp = vmicp;
+ if (erts_monitor_is_origin(mon)) {
+ MonitorInfoCollection *micp = vmicp;
- if (mon->type != MON_ORIGIN) {
- return;
- }
- EXTEND_MONITOR_INFOS(micp);
- if (is_atom(mon->pid)) { /* external by name */
- micp->mi[micp->mi_i].entity = mon->name;
- micp->mi[micp->mi_i].node = mon->pid;
- micp->sz += 3; /* need one 2-tuple */
- } else if (is_external_pid(mon->pid)) { /* external by pid */
- micp->mi[micp->mi_i].entity = mon->pid;
- micp->mi[micp->mi_i].node = NIL;
- micp->sz += NC_HEAP_SIZE(mon->pid);
- } else if (!is_nil(mon->name)) { /* internal by name */
- micp->mi[micp->mi_i].entity = mon->name;
- micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
- micp->sz += 3; /* need one 2-tuple */
- } else { /* internal by pid */
- micp->mi[micp->mi_i].entity = mon->pid;
- micp->mi[micp->mi_i].node = NIL;
- /* no additional heap space needed */
- }
-
- /* have always pid at hand, to assist with figuring out if its a port or
- * a process, when we monitored by name and process_info is requested.
- * See: erl_bif_info.c:process_info_aux section for am_monitors */
- micp->mi[micp->mi_i].pid = mon->pid;
+ EXTEND_MONITOR_INFOS(micp);
+
+ micp->mi[micp->mi_i].type = mon->type;
+
+ switch (mon->type) {
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_PORT:
+ case ERTS_MON_TYPE_DIST_PROC:
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ if (!(mon->flags & ERTS_ML_FLG_NAME)) {
+ micp->mi[micp->mi_i].named = 0;
+ micp->mi[micp->mi_i].entity.term = mon->other.item;
+ micp->mi[micp->mi_i].node = NIL;
+ if (is_not_atom(mon->other.item))
+ micp->sz += NC_HEAP_SIZE(mon->other.item);
+ }
+ else {
+ ErtsMonitorDataExtended *mdep;
+ micp->mi[micp->mi_i].named = !0;
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
+ micp->mi[micp->mi_i].entity.term = mdep->u.name;
+ if (mdep->dist)
+ micp->mi[micp->mi_i].node = mdep->dist->nodename;
+ else
+ micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
+ micp->sz += 3; /* need one 2-tuple */
+ }
- micp->mi_i++;
- micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
+ /* have always pid at hand, to assist with figuring out if its a port or
+ * a process, when we monitored by name and process_info is requested.
+ * See: erl_bif_info.c:process_info_aux section for am_monitors */
+ micp->mi[micp->mi_i].pid = mon->other.item;
+
+ micp->mi_i++;
+ micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
+ break;
+ default:
+ break;
+ }
+ }
}
static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
{
MonitorInfoCollection *micp = vmicp;
- if (mon->type != MON_TARGET) {
- return;
- }
+ if (erts_monitor_is_target(mon)) {
- EXTEND_MONITOR_INFOS(micp);
+ EXTEND_MONITOR_INFOS(micp);
- micp->mi[micp->mi_i].node = NIL;
- micp->mi[micp->mi_i].entity = mon->pid;
- micp->sz += (NC_HEAP_SIZE(mon->pid) + 2 /* cons */);
- micp->mi_i++;
+ micp->mi[micp->mi_i].type = mon->type;
+ micp->mi[micp->mi_i].named = !!(mon->flags & ERTS_ML_FLG_NAME);
+ switch (mon->type) {
+
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_PORT:
+ case ERTS_MON_TYPE_DIST_PROC:
+
+ micp->mi[micp->mi_i].entity.term = mon->other.item;
+ micp->mi[micp->mi_i].node = NIL;
+ micp->sz += NC_HEAP_SIZE(mon->other.item);
+
+ micp->sz += 2; /* cons */;
+ micp->mi_i++;
+ break;
+
+ case ERTS_MON_TYPE_RESOURCE:
+
+ micp->mi[micp->mi_i].entity.resource = mon->other.ptr;
+ micp->mi[micp->mi_i].node = NIL;
+ micp->sz += erts_resource_ref_size(mon->other.ptr);
+
+ micp->sz += 2; /* cons */;
+ micp->mi_i++;
+ break;
+
+ default:
+ break;
+ }
+
+ }
}
typedef struct {
- Process *c_p;
- ErtsProcLocks c_p_locks;
- ErtsSuspendMonitor **smi;
+ ErtsMonitorSuspend **smi;
Uint smi_i;
Uint smi_max;
- int sz;
+ Uint sz;
} ErtsSuspendMonitorInfoCollection;
-#define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC, CP, CPL) do { \
- (SMIC).c_p = (CP); \
- (SMIC).c_p_locks = (CPL); \
+#define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC) do { \
(SMIC).smi = NULL; \
(SMIC).smi_i = (SMIC).smi_max = 0; \
(SMIC).sz = 0; \
@@ -492,10 +638,10 @@ do { \
(SMICP)->smi, \
((SMICP)->smi_max \
+ ERTS_SMI_INC) \
- * sizeof(ErtsSuspendMonitor *)) \
+ * sizeof(ErtsMonitorSuspend *)) \
: erts_alloc(ERTS_ALC_T_TMP, \
ERTS_SMI_INC \
- * sizeof(ErtsSuspendMonitor *))); \
+ * sizeof(ErtsMonitorSuspend *))); \
(SMICP)->smi_max += ERTS_SMI_INC; \
} \
} while (0)
@@ -508,35 +654,28 @@ do { \
} while (0)
static void
-collect_one_suspend_monitor(ErtsSuspendMonitor *smon, void *vsmicp)
+collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp)
{
- ErtsSuspendMonitorInfoCollection *smicp = vsmicp;
- Process *suspendee = erts_pid2proc(smicp->c_p,
- smicp->c_p_locks,
- smon->pid,
- 0);
- if (suspendee) { /* suspendee is alive */
- Sint a, p;
- if (smon->active) {
- smon->active += smon->pending;
- smon->pending = 0;
- }
+ if (mon->type == ERTS_MON_TYPE_SUSPEND) {
+ Sint count;
+ erts_aint_t mstate;
+ ErtsMonitorSuspend *msp;
+ ErtsSuspendMonitorInfoCollection *smicp;
- ASSERT((smon->active && !smon->pending)
- || (smon->pending && !smon->active));
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
+ smicp = vsmicp;
ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
- smicp->smi[smicp->smi_i] = smon;
+ smicp->smi[smicp->smi_i] = msp;
smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
- a = (Sint) smon->active; /* quiet compiler warnings */
- p = (Sint) smon->pending; /* on 64-bit machines */
+ mstate = erts_atomic_read_nob(&msp->state);
- if (!IS_SSMALL(a))
- smicp->sz += BIG_UINT_HEAP_SIZE;
- if (!IS_SSMALL(p))
+ count = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
+ if (!IS_SSMALL(count))
smicp->sz += BIG_UINT_HEAP_SIZE;
+
smicp->smi_i++;
}
}
@@ -545,123 +684,222 @@ collect_one_suspend_monitor(ErtsSuspendMonitor *smon, void *vsmicp)
* process_info/[1,2]
*/
-#define ERTS_PI_FAIL_TYPE_BADARG 0
-#define ERTS_PI_FAIL_TYPE_YIELD 1
-#define ERTS_PI_FAIL_TYPE_AWAIT_EXIT 2
-
-static ERTS_INLINE ErtsProcLocks
-pi_locks(Eterm info)
-{
- switch (info) {
- case am_status:
- case am_priority:
- case am_trap_exit:
- return ERTS_PROC_LOCK_STATUS;
- case am_links:
- case am_monitors:
- case am_monitored_by:
- case am_suspending:
- return ERTS_PROC_LOCK_LINK;
- case am_messages:
- case am_message_queue_len:
- case am_total_heap_size:
- return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ;
- case am_memory:
- return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_MSGQ;
- default:
- return ERTS_PROC_LOCK_MAIN;
- }
-}
-
/*
* All valid process_info arguments.
*/
-static Eterm pi_args[] = {
- am_registered_name,
- am_current_function,
- am_initial_call,
- am_status,
- am_messages,
- am_message_queue_len,
- am_links,
- am_monitors,
- am_monitored_by,
- am_dictionary,
- am_trap_exit,
- am_error_handler,
- am_heap_size,
- am_stack_size,
- am_memory,
- am_garbage_collection,
- am_group_leader,
- am_reductions,
- am_priority,
- am_trace,
- am_binary,
- am_sequential_trace_token,
- am_catchlevel,
- am_backtrace,
- am_last_calls,
- am_total_heap_size,
- am_suspending,
- am_min_heap_size,
- am_min_bin_vheap_size,
- am_max_heap_size,
- am_current_location,
- am_current_stacktrace,
- am_message_queue_data,
- am_garbage_collection_info
+
+#define ERTS_PI_IX_REGISTERED_NAME 0
+#define ERTS_PI_IX_CURRENT_FUNCTION 1
+#define ERTS_PI_IX_INITIAL_CALL 2
+#define ERTS_PI_IX_STATUS 3
+#define ERTS_PI_IX_MESSAGES 4
+#define ERTS_PI_IX_MESSAGE_QUEUE_LEN 5
+#define ERTS_PI_IX_LINKS 6
+#define ERTS_PI_IX_MONITORS 7
+#define ERTS_PI_IX_MONITORED_BY 8
+#define ERTS_PI_IX_DICTIONARY 9
+#define ERTS_PI_IX_TRAP_EXIT 10
+#define ERTS_PI_IX_ERROR_HANDLER 11
+#define ERTS_PI_IX_HEAP_SIZE 12
+#define ERTS_PI_IX_STACK_SIZE 13
+#define ERTS_PI_IX_MEMORY 14
+#define ERTS_PI_IX_GARBAGE_COLLECTION 15
+#define ERTS_PI_IX_GROUP_LEADER 16
+#define ERTS_PI_IX_REDUCTIONS 17
+#define ERTS_PI_IX_PRIORITY 18
+#define ERTS_PI_IX_TRACE 19
+#define ERTS_PI_IX_BINARY 20
+#define ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN 21
+#define ERTS_PI_IX_CATCHLEVEL 22
+#define ERTS_PI_IX_BACKTRACE 23
+#define ERTS_PI_IX_LAST_CALLS 24
+#define ERTS_PI_IX_TOTAL_HEAP_SIZE 25
+#define ERTS_PI_IX_SUSPENDING 26
+#define ERTS_PI_IX_MIN_HEAP_SIZE 27
+#define ERTS_PI_IX_MIN_BIN_VHEAP_SIZE 28
+#define ERTS_PI_IX_MAX_HEAP_SIZE 29
+#define ERTS_PI_IX_CURRENT_LOCATION 30
+#define ERTS_PI_IX_CURRENT_STACKTRACE 31
+#define ERTS_PI_IX_MESSAGE_QUEUE_DATA 32
+#define ERTS_PI_IX_GARBAGE_COLLECTION_INFO 33
+#define ERTS_PI_IX_MAGIC_REF 34
+#define ERTS_PI_IX_FULLSWEEP_AFTER 35
+
+#define ERTS_PI_FLAG_SINGELTON (1 << 0)
+#define ERTS_PI_FLAG_ALWAYS_WRAP (1 << 1)
+#define ERTS_PI_FLAG_WANT_MSGS (1 << 2)
+#define ERTS_PI_FLAG_NEED_MSGQ_LEN (1 << 3)
+#define ERTS_PI_FLAG_FORCE_SIG_SEND (1 << 4)
+#define ERTS_PI_FLAG_REQUEST_FOR_OTHER (1 << 5)
+
+#define ERTS_PI_UNRESERVE(RS, SZ) \
+ (ASSERT((RS) >= (SZ)), (RS) -= (SZ))
+
+
+typedef struct {
+ Eterm name;
+ Uint reserve_size;
+ int flags;
+ ErtsProcLocks locks;
+} ErtsProcessInfoArgs;
+
+static ErtsProcessInfoArgs pi_args[] = {
+ {am_registered_name, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_current_function, 4, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_initial_call, 4, 0, ERTS_PROC_LOCK_MAIN},
+ {am_status, 0, 0, 0},
+ {am_messages, 0, ERTS_PI_FLAG_WANT_MSGS|ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_message_queue_len, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN, ERTS_PROC_LOCK_MAIN},
+ {am_links, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_monitors, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_monitored_by, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_dictionary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_trap_exit, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_error_handler, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_stack_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_memory, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_garbage_collection, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + ERTS_MAX_HEAP_SIZE_MAP_SZ, 0, ERTS_PROC_LOCK_MAIN},
+ {am_group_leader, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_reductions, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_priority, 0, 0, 0},
+ {am_trace, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_binary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_sequential_trace_token, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_catchlevel, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_backtrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_last_calls, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_total_heap_size, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_suspending, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, 0},
+ {am_min_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_min_bin_vheap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_max_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_current_location, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_current_stacktrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_message_queue_data, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_garbage_collection_info, ERTS_PROCESS_GC_INFO_MAX_SIZE, 0, ERTS_PROC_LOCK_MAIN},
+ {am_magic_ref, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_fullsweep_after, 0, 0, ERTS_PROC_LOCK_MAIN}
};
-#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
+#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(pi_args[0])))
+
+#ifdef DEBUG
+# define ERTS_PI_DEF_ARR_SZ 2
+#else
+# define ERTS_PI_DEF_ARR_SZ ERTS_PI_ARGS
+#endif
static ERTS_INLINE Eterm
pi_ix2arg(int ix)
{
if (ix < 0 || ERTS_PI_ARGS <= ix)
return am_undefined;
- return pi_args[ix];
+ return pi_args[ix].name;
+}
+
+static ERTS_INLINE int
+pi_ix2flags(int ix)
+{
+ if (ix < 0 || ERTS_PI_ARGS <= ix)
+ return 0;
+ return pi_args[ix].flags;
+}
+
+static ERTS_INLINE Uint
+pi_ix2rsz(int ix)
+{
+ if (ix < 0 || ERTS_PI_ARGS <= ix)
+ return 0;
+ return pi_args[ix].reserve_size;
+}
+
+static ERTS_INLINE ErtsProcLocks
+pi_ix2locks(int ix)
+{
+ if (ix < 0 || ERTS_PI_ARGS <= ix)
+ return 0;
+ return pi_args[ix].locks;
}
static ERTS_INLINE int
pi_arg2ix(Eterm arg)
{
switch (arg) {
- case am_registered_name: return 0;
- case am_current_function: return 1;
- case am_initial_call: return 2;
- case am_status: return 3;
- case am_messages: return 4;
- case am_message_queue_len: return 5;
- case am_links: return 6;
- case am_monitors: return 7;
- case am_monitored_by: return 8;
- case am_dictionary: return 9;
- case am_trap_exit: return 10;
- case am_error_handler: return 11;
- case am_heap_size: return 12;
- case am_stack_size: return 13;
- case am_memory: return 14;
- case am_garbage_collection: return 15;
- case am_group_leader: return 16;
- case am_reductions: return 17;
- case am_priority: return 18;
- case am_trace: return 19;
- case am_binary: return 20;
- case am_sequential_trace_token: return 21;
- case am_catchlevel: return 22;
- case am_backtrace: return 23;
- case am_last_calls: return 24;
- case am_total_heap_size: return 25;
- case am_suspending: return 26;
- case am_min_heap_size: return 27;
- case am_min_bin_vheap_size: return 28;
- case am_max_heap_size: return 29;
- case am_current_location: return 30;
- case am_current_stacktrace: return 31;
- case am_message_queue_data: return 32;
- case am_garbage_collection_info: return 33;
- default: return -1;
+ case am_registered_name:
+ return ERTS_PI_IX_REGISTERED_NAME;
+ case am_current_function:
+ return ERTS_PI_IX_CURRENT_FUNCTION;
+ case am_initial_call:
+ return ERTS_PI_IX_INITIAL_CALL;
+ case am_status:
+ return ERTS_PI_IX_STATUS;
+ case am_messages:
+ return ERTS_PI_IX_MESSAGES;
+ case am_message_queue_len:
+ return ERTS_PI_IX_MESSAGE_QUEUE_LEN;
+ case am_links:
+ return ERTS_PI_IX_LINKS;
+ case am_monitors:
+ return ERTS_PI_IX_MONITORS;
+ case am_monitored_by:
+ return ERTS_PI_IX_MONITORED_BY;
+ case am_dictionary:
+ return ERTS_PI_IX_DICTIONARY;
+ case am_trap_exit:
+ return ERTS_PI_IX_TRAP_EXIT;
+ case am_error_handler:
+ return ERTS_PI_IX_ERROR_HANDLER;
+ case am_heap_size:
+ return ERTS_PI_IX_HEAP_SIZE;
+ case am_stack_size:
+ return ERTS_PI_IX_STACK_SIZE;
+ case am_memory:
+ return ERTS_PI_IX_MEMORY;
+ case am_garbage_collection:
+ return ERTS_PI_IX_GARBAGE_COLLECTION;
+ case am_group_leader:
+ return ERTS_PI_IX_GROUP_LEADER;
+ case am_reductions:
+ return ERTS_PI_IX_REDUCTIONS;
+ case am_priority:
+ return ERTS_PI_IX_PRIORITY;
+ case am_trace:
+ return ERTS_PI_IX_TRACE;
+ case am_binary:
+ return ERTS_PI_IX_BINARY;
+ case am_sequential_trace_token:
+ return ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN;
+ case am_catchlevel:
+ return ERTS_PI_IX_CATCHLEVEL;
+ case am_backtrace:
+ return ERTS_PI_IX_BACKTRACE;
+ case am_last_calls:
+ return ERTS_PI_IX_LAST_CALLS;
+ case am_total_heap_size:
+ return ERTS_PI_IX_TOTAL_HEAP_SIZE;
+ case am_suspending:
+ return ERTS_PI_IX_SUSPENDING;
+ case am_min_heap_size:
+ return ERTS_PI_IX_MIN_HEAP_SIZE;
+ case am_min_bin_vheap_size:
+ return ERTS_PI_IX_MIN_BIN_VHEAP_SIZE;
+ case am_max_heap_size:
+ return ERTS_PI_IX_MAX_HEAP_SIZE;
+ case am_current_location:
+ return ERTS_PI_IX_CURRENT_LOCATION;
+ case am_current_stacktrace:
+ return ERTS_PI_IX_CURRENT_STACKTRACE;
+ case am_message_queue_data:
+ return ERTS_PI_IX_MESSAGE_QUEUE_DATA;
+ case am_garbage_collection_info:
+ return ERTS_PI_IX_GARBAGE_COLLECTION_INFO;
+ case am_magic_ref:
+ return ERTS_PI_IX_MAGIC_REF;
+ case am_fullsweep_after:
+ return ERTS_PI_IX_FULLSWEEP_AFTER;
+ default:
+ return -1;
}
}
@@ -671,7 +909,6 @@ static Eterm pi_1_keys[] = {
am_initial_call,
am_status,
am_message_queue_len,
- am_messages,
am_links,
am_dictionary,
am_trap_exit,
@@ -715,155 +952,41 @@ process_info_init(void)
}
-static ERTS_INLINE Process *
-pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks)
-{
-#ifdef ERTS_SMP
- /*
- * If the main lock is needed, we use erts_pid2proc_not_running()
- * instead of erts_pid2proc() for two reasons:
- * * Current function of pid and possibly other information will
- * have been updated so that process_info() is consistent with an
- * info-request/info-response signal model.
- * * We avoid blocking the whole scheduler executing the
- * process that is calling process_info() for a long time
- * which will happen if pid is currently running.
- * The caller of process_info() may have to yield if pid
- * is currently running.
- */
-
- if (info_locks & ERTS_PROC_LOCK_MAIN)
- return erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN,
- pid, info_locks);
- else
-#endif
- return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
- pid, info_locks);
-}
-
-
-
static BIF_RETTYPE
-process_info_aux(Process *BIF_P,
+process_info_aux(Process *c_p,
+ ErtsHeapFactory *hfact,
Process *rp,
ErtsProcLocks rp_locks,
- Eterm rpid,
- Eterm item,
- int always_wrap);
-
-#define ERTS_PI_RES_ELEM_IX_BUF_INC 1024
-#define ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ ERTS_PI_ARGS
+ int item_ix,
+ int flags,
+ Uint *reserve_sizep,
+ Uint *reds);
-static Eterm
-process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
- int *fail_type)
+Eterm
+erts_process_info(Process *c_p,
+ ErtsHeapFactory *hfact,
+ Process *rp,
+ ErtsProcLocks rp_locks,
+ int *item_ix,
+ int item_ix_len,
+ int flags,
+ Uint reserve_size,
+ Uint *reds)
{
- int want_messages = 0;
- int def_res_elem_ix_buf[ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ];
- int *res_elem_ix = &def_res_elem_ix_buf[0];
- int res_elem_ix_ix = -1;
- int res_elem_ix_sz = ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ;
+ Eterm res;
Eterm part_res[ERTS_PI_ARGS];
- Eterm res, arg;
- Uint *hp, *hp_end;
- ErtsProcLocks locks = (ErtsProcLocks) 0;
- int res_len, ix;
- Process *rp = NULL;
+ int item_ix_ix, ix;
- *fail_type = ERTS_PI_FAIL_TYPE_BADARG;
+ if (ERTS_PI_FLAG_SINGELTON & flags) {
+ ASSERT(item_ix_len == 1);
+ res = process_info_aux(c_p, hfact, rp, rp_locks, item_ix[0],
+ flags, &reserve_size, reds);
+ return res;
+ }
for (ix = 0; ix < ERTS_PI_ARGS; ix++)
part_res[ix] = THE_NON_VALUE;
- ASSERT(is_list(list));
-
- while (is_list(list)) {
- Eterm* consp = list_val(list);
-
- arg = CAR(consp);
- ix = pi_arg2ix(arg);
- if (ix < 0) {
- res = THE_NON_VALUE;
- goto done;
- }
- if (arg == am_messages)
- want_messages = 1;
- locks |= pi_locks(arg);
- res_elem_ix_ix++;
- if (res_elem_ix_ix >= res_elem_ix_sz) {
- if (res_elem_ix != &def_res_elem_ix_buf[0])
- res_elem_ix =
- erts_realloc(ERTS_ALC_T_TMP,
- res_elem_ix,
- sizeof(int)*(res_elem_ix_sz
- += ERTS_PI_RES_ELEM_IX_BUF_INC));
- else {
- int new_res_elem_ix_sz = ERTS_PI_RES_ELEM_IX_BUF_INC;
- int *new_res_elem_ix = erts_alloc(ERTS_ALC_T_TMP,
- sizeof(int)*new_res_elem_ix_sz);
- sys_memcpy((void *) new_res_elem_ix,
- (void *) res_elem_ix,
- sizeof(int)*res_elem_ix_sz);
- res_elem_ix = new_res_elem_ix;
- res_elem_ix_sz = new_res_elem_ix_sz;
- }
- }
- res_elem_ix[res_elem_ix_ix] = ix;
- list = CDR(consp);
- }
- if (is_not_nil(list)) {
- res = THE_NON_VALUE;
- goto done;
- }
-
- res_len = res_elem_ix_ix+1;
-
- ASSERT(res_len > 0);
-
- rp = pi_pid2proc(c_p, pid, locks|ERTS_PROC_LOCK_STATUS);
- if (!rp) {
- res = am_undefined;
- goto done;
- }
- else if (rp == ERTS_PROC_LOCK_BUSY) {
- rp = NULL;
- res = THE_NON_VALUE;
- *fail_type = ERTS_PI_FAIL_TYPE_YIELD;
- goto done;
- }
- else if (c_p != rp && ERTS_PROC_PENDING_EXIT(rp)) {
- locks |= ERTS_PROC_LOCK_STATUS;
- res = THE_NON_VALUE;
- *fail_type = ERTS_PI_FAIL_TYPE_AWAIT_EXIT;
- goto done;
- }
- else {
- ErtsProcLocks unlock_locks = 0;
-
- if (c_p == rp)
- locks |= ERTS_PROC_LOCK_MAIN;
-
- if (!(locks & ERTS_PROC_LOCK_STATUS))
- unlock_locks |= ERTS_PROC_LOCK_STATUS;
-
- if (locks & ERTS_PROC_LOCK_MSGQ) {
- /*
- * Move in queue into private queue and
- * release msgq lock, enabling others to
- * send messages to the process while it
- * is being inspected...
- */
- ASSERT(locks & ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
- locks &= ~ERTS_PROC_LOCK_MSGQ;
- unlock_locks |= ERTS_PROC_LOCK_MSGQ;
- }
-
- if (unlock_locks)
- erts_smp_proc_unlock(rp, unlock_locks);
-
- }
-
/*
* We always handle 'messages' first if it should be part
* of the result. This since if both 'messages' and
@@ -871,28 +994,31 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
* change the result of 'message_queue_len' (in case
* the queue contain bad distribution messages).
*/
- if (want_messages) {
+ if (flags & ERTS_PI_FLAG_WANT_MSGS) {
ix = pi_arg2ix(am_messages);
ASSERT(part_res[ix] == THE_NON_VALUE);
- part_res[ix] = process_info_aux(c_p, rp, locks, pid, am_messages, always_wrap);
- ASSERT(part_res[ix] != THE_NON_VALUE);
+ res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
+ flags, &reserve_size, reds);
+ ASSERT(res != am_undefined);
+ ASSERT(res != THE_NON_VALUE);
+ part_res[ix] = res;
}
- for (; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
- ix = res_elem_ix[res_elem_ix_ix];
+ for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
+ ix = item_ix[item_ix_ix];
if (part_res[ix] == THE_NON_VALUE) {
- arg = pi_ix2arg(ix);
- part_res[ix] = process_info_aux(c_p, rp, locks, pid, arg, always_wrap);
- ASSERT(part_res[ix] != THE_NON_VALUE);
+ res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
+ flags, &reserve_size, reds);
+ ASSERT(res != am_undefined);
+ ASSERT(res != THE_NON_VALUE);
+ part_res[ix] = res;
}
}
- hp = HAlloc(c_p, res_len*2);
- hp_end = hp + res_len*2;
res = NIL;
- for (res_elem_ix_ix = res_len - 1; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
- ix = res_elem_ix[res_elem_ix_ix];
+ for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
+ ix = item_ix[item_ix_ix];
ASSERT(part_res[ix] != THE_NON_VALUE);
/*
* If we should ignore the value of registered_name,
@@ -900,173 +1026,317 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
* beginning of process_info_aux().
*/
if (is_nil(part_res[ix])) {
- ASSERT(!always_wrap);
+ ASSERT(!(flags & ERTS_PI_FLAG_ALWAYS_WRAP));
ASSERT(pi_ix2arg(ix) == am_registered_name);
}
else {
+ Eterm *hp;
+ ERTS_PI_UNRESERVE(reserve_size, 2);
+ hp = erts_produce_heap(hfact, 2, reserve_size);
res = CONS(hp, part_res[ix], res);
- hp += 2;
}
}
- if (!always_wrap) {
- HRelease(c_p, hp_end, hp);
- }
-
- done:
-
- if (c_p == rp)
- locks &= ~ERTS_PROC_LOCK_MAIN;
- if (locks && rp)
- erts_smp_proc_unlock(rp, locks);
-
- if (res_elem_ix != &def_res_elem_ix_buf[0])
- erts_free(ERTS_ALC_T_TMP, res_elem_ix);
-
return res;
}
-BIF_RETTYPE process_info_1(BIF_ALIST_1)
+static void
+pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix);
+
+static BIF_RETTYPE
+process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
{
+ ErtsHeapFactory hfact;
+ int def_arr[ERTS_PI_DEF_ARR_SZ];
+ int *item_ix = &def_arr[0];
+ Process *rp = NULL;
+ erts_aint32_t state;
+ BIF_RETTYPE ret;
+ Uint reds = 0;
+ ErtsProcLocks locks = 0;
+ int flags;
+ Uint reserve_size;
+ int len;
Eterm res;
- int fail_type;
- if (is_external_pid(BIF_ARG_1)
- && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
- BIF_RET(am_undefined);
-
- if (is_not_internal_pid(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
- }
-
- res = process_info_list(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, &fail_type);
- if (is_non_value(res)) {
- switch (fail_type) {
- case ERTS_PI_FAIL_TYPE_BADARG:
- BIF_ERROR(BIF_P, BADARG);
- case ERTS_PI_FAIL_TYPE_YIELD:
- ERTS_BIF_YIELD1(bif_export[BIF_process_info_1], BIF_P, BIF_ARG_1);
- case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
- ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
- default:
- erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error", __FILE__, __LINE__);
- }
+ ERTS_CT_ASSERT(ERTS_PI_DEF_ARR_SZ > 0);
+
+ if (c_p->common.id == pid) {
+ int local_only = c_p->flags & F_LOCAL_SIGS_ONLY;
+ int sres, sreds, reds_left;
+
+ reds_left = ERTS_BIF_REDS_LEFT(c_p);
+ sreds = reds_left;
+
+ if (!local_only) {
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+ }
+
+ sres = erts_proc_sig_handle_incoming(c_p, &state, &sreds, sreds, !0);
+
+ BUMP_REDS(c_p, (int) sreds);
+ reds_left -= sreds;
+
+ if (state & ERTS_PSFLG_EXITING) {
+ c_p->flags &= ~F_LOCAL_SIGS_ONLY;
+ goto exited;
+ }
+ if (!sres | (reds_left <= 0)) {
+ /*
+ * More signals to handle or out of reds; need
+ * to yield and continue. Prevent fetching of
+ * more signals by setting local-sigs-only flag.
+ */
+ c_p->flags |= F_LOCAL_SIGS_ONLY;
+ goto yield;
+ }
+
+ c_p->flags &= ~F_LOCAL_SIGS_ONLY;
}
- ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
- BIF_RET(res);
-}
+ if (is_atom(opt)) {
+ int ix = pi_arg2ix(opt);
+ item_ix[0] = ix;
+ len = 1;
+ locks = pi_ix2locks(ix);
+ reserve_size = 3 + pi_ix2rsz(ix);
+ flags = ERTS_PI_FLAG_SINGELTON;
+ flags |= pi_ix2flags(ix);
+ if (ix < 0)
+ goto badarg;
+ }
+ else {
+ Eterm list = opt;
+ Uint size = ERTS_PI_DEF_ARR_SZ;
+ len = 0;
+ reserve_size = 0;
+ locks = 0;
+ flags = 0;
-BIF_RETTYPE process_info_2(BIF_ALIST_2)
-{
- Eterm res;
- Process *rp;
- Eterm pid = BIF_ARG_1;
- ErtsProcLocks info_locks;
- int fail_type;
+ while (is_list(list)) {
+ Eterm *consp = list_val(list);
+ Eterm arg = CAR(consp);
+ int ix = pi_arg2ix(arg);
+ if (ix < 0)
+ goto badarg;
- if (is_external_pid(pid)
- && external_pid_dist_entry(pid) == erts_this_dist_entry)
- BIF_RET(am_undefined);
-
- if (is_not_internal_pid(pid)) {
- BIF_ERROR(BIF_P, BADARG);
- }
-
- if (is_nil(BIF_ARG_2))
- BIF_RET(NIL);
-
- if (is_list(BIF_ARG_2)) {
- res = process_info_list(BIF_P, BIF_ARG_1, BIF_ARG_2, 1, &fail_type);
- if (is_non_value(res)) {
- switch (fail_type) {
- case ERTS_PI_FAIL_TYPE_BADARG:
- BIF_ERROR(BIF_P, BADARG);
- case ERTS_PI_FAIL_TYPE_YIELD:
- ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
- ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
- default:
- erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error",
- __FILE__, __LINE__);
- }
- }
- ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
- BIF_RET(res);
+ if (len >= size)
+ pi_setup_grow(&item_ix, def_arr, &size, len);
+
+ item_ix[len++] = ix;
+
+ locks |= pi_ix2locks(ix);
+ flags |= pi_ix2flags(ix);
+ reserve_size += pi_ix2rsz(ix);
+ reserve_size += 3; /* 2-tuple */
+ reserve_size += 2; /* cons */
+
+ list = CDR(consp);
+ }
+
+ if (is_not_nil(list))
+ goto badarg;
}
- if (pi_arg2ix(BIF_ARG_2) < 0)
- BIF_ERROR(BIF_P, BADARG);
+ if (is_not_internal_pid(pid)) {
+ if (is_external_pid(pid)
+ && external_pid_dist_entry(pid) == erts_this_dist_entry)
+ goto undefined;
+ goto badarg;
+ }
- info_locks = pi_locks(BIF_ARG_2);
+ if (always_wrap)
+ flags |= ERTS_PI_FLAG_ALWAYS_WRAP;
- rp = pi_pid2proc(BIF_P, pid, info_locks|ERTS_PROC_LOCK_STATUS);
- if (!rp)
- res = am_undefined;
- else if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- else if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
- erts_smp_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS);
- ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
+ if (c_p->common.id == pid) {
+ rp = c_p;
+ if (locks & ~ERTS_PROC_LOCK_MAIN)
+ erts_proc_lock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
+ locks |= ERTS_PROC_LOCK_MAIN;
}
else {
- ErtsProcLocks unlock_locks = 0;
+ if (flags & ERTS_PI_FLAG_FORCE_SIG_SEND)
+ goto send_signal;
+ state = ERTS_PSFLG_RUNNING; /* fail state... */
+ rp = erts_try_lock_sig_free_proc(pid, locks, &state);
+ if (!rp)
+ goto undefined;
+ if (rp == ERTS_PROC_LOCK_BUSY) {
+ rp = NULL;
+ goto send_signal;
+ }
+ if (state & ERTS_PSFLG_EXITING) {
+ if (locks)
+ erts_proc_unlock(rp, locks);
+ locks = 0;
+ /* wait for it to terminate properly... */
+ goto send_signal;
+ }
+ if (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN) {
+ ASSERT(locks & ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(rp);
+ if (c_p->sig_qs.cont) {
+ erts_proc_unlock(rp, locks|ERTS_PROC_LOCK_MSGQ);
+ locks = 0;
+ goto send_signal;
+ }
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
+ }
+ }
- if (BIF_P == rp)
- info_locks |= ERTS_PROC_LOCK_MAIN;
+ erts_factory_proc_init(&hfact, c_p);
- if (!(info_locks & ERTS_PROC_LOCK_STATUS))
- unlock_locks |= ERTS_PROC_LOCK_STATUS;
+ res = erts_process_info(c_p, &hfact, rp, locks, item_ix, len,
+ flags, reserve_size, &reds);
- if (info_locks & ERTS_PROC_LOCK_MSGQ) {
- /*
- * Move in queue into private queue and
- * release msgq lock, enabling others to
- * send messages to the process while it
- * is being inspected...
- */
- ASSERT(info_locks & ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
- info_locks &= ~ERTS_PROC_LOCK_MSGQ;
- unlock_locks |= ERTS_PROC_LOCK_MSGQ;
- }
+ erts_factory_close(&hfact);
- if (unlock_locks)
- erts_smp_proc_unlock(rp, unlock_locks);
+ if (reds > INT_MAX/2)
+ reds = INT_MAX/2;
+ BUMP_REDS(c_p, (int) reds);
- res = process_info_aux(BIF_P, rp, info_locks, pid, BIF_ARG_2, 0);
+ state = erts_atomic32_read_acqb(&rp->state);
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)) {
+ if (state & ERTS_PSFLG_FREE) {
+ ASSERT(!locks);
+ goto undefined;
+ }
+ if (locks)
+ erts_proc_unlock(rp, locks);
+ locks = 0;
+ /* wait for it to terminate properly... */
+ goto send_signal;
}
- ASSERT(is_value(res));
-#ifdef ERTS_SMP
- if (BIF_P == rp)
- info_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (rp && info_locks)
- erts_smp_proc_unlock(rp, info_locks);
-#endif
+ ERTS_BIF_PREP_RET(ret, res);
- ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
- BIF_RET(res);
+done:
+
+ if (c_p == rp)
+ locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ if (locks && rp)
+ erts_proc_unlock(rp, locks);
+
+ if (item_ix != def_arr)
+ erts_free(ERTS_ALC_T_TMP, item_ix);
+
+ return ret;
+
+badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ goto done;
+
+undefined:
+ ERTS_BIF_PREP_RET(ret, am_undefined);
+ goto done;
+
+exited:
+ ERTS_BIF_PREP_EXITED(ret, c_p);
+ goto done;
+
+yield:
+ if (pi2)
+ ERTS_BIF_PREP_YIELD2(ret, bif_export[BIF_process_info_2], c_p, pid, opt);
+ else
+ ERTS_BIF_PREP_YIELD1(ret, bif_export[BIF_process_info_1], c_p, pid);
+ goto done;
+
+send_signal: {
+ Eterm ref = erts_make_ref(c_p);
+ int enqueued, need_msgq_len;
+ flags |= ERTS_PI_FLAG_REQUEST_FOR_OTHER;
+ need_msgq_len = (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ /*
+ * Set receive mark so we wont have to scan the whole
+ * message queue for the result. Note caller unconditionally
+ * has to enter a receive only matching messages containing
+ * 'ref', or restore save pointer.
+ */
+ ERTS_RECV_MARK_SAVE(c_p);
+ ERTS_RECV_MARK_SET(c_p);
+ enqueued = erts_proc_sig_send_process_info_request(c_p, pid, item_ix,
+ len, need_msgq_len,
+ flags, reserve_size,
+ ref);
+ if (!enqueued) {
+ /* Restore save pointer... */
+ JOIN_MESSAGE(c_p);
+ goto undefined;
+ }
+ ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, ref);
+ goto done;
+ }
+}
+
+static void
+pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix)
+{
+ *sz = (ix+1) + ERTS_PI_DEF_ARR_SZ;
+ if (*arr != def_arr)
+ *arr = erts_realloc(ERTS_ALC_T_TMP, *arr, (*sz)*sizeof(int));
+ else {
+ int *new_arr = erts_alloc(ERTS_ALC_T_TMP, (*sz)*sizeof(int));
+ sys_memcpy((void *) new_arr, (void *) def_arr,
+ sizeof(int)*ERTS_PI_DEF_ARR_SZ);
+ *arr = new_arr;
+ }
+}
+
+
+BIF_RETTYPE process_info_2(BIF_ALIST_2)
+{
+ return process_info_bif(BIF_P, BIF_ARG_1, BIF_ARG_2, !is_atom(BIF_ARG_2), !0);
+}
+
+BIF_RETTYPE process_info_1(BIF_ALIST_1)
+{
+ return process_info_bif(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, 0);
}
Eterm
-process_info_aux(Process *BIF_P,
+process_info_aux(Process *c_p,
+ ErtsHeapFactory *hfact,
Process *rp,
ErtsProcLocks rp_locks,
- Eterm rpid,
- Eterm item,
- int always_wrap)
+ int item_ix,
+ int flags,
+ Uint *reserve_sizep,
+ Uint *reds)
{
Eterm *hp;
Eterm res = NIL;
+ Uint reserved;
+ Uint reserve_size = *reserve_sizep;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ ErtsProcLocks locks = erts_proc_lc_my_proc_locks(rp);
+
+ switch (item_ix) {
+ case ERTS_PI_IX_STATUS:
+ case ERTS_PI_IX_PRIORITY:
+ case ERTS_PI_IX_SUSPENDING:
+ ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCK_MAIN) == 0);
+ break;
+ default:
+ ERTS_LC_ASSERT(locks == ERTS_PROC_LOCK_MAIN);
+ break;
+ }
+#endif
+
+ reserved = pi_ix2rsz(item_ix);
+ ERTS_PI_UNRESERVE(reserve_size, reserved);
+
+ (*reds)++;
ASSERT(rp);
/*
- * Q: Why this always_wrap argument?
+ * Q: Why this ERTS_PI_FLAG_ALWAYS_WRAP flag?
*
* A: registered_name is strange. If process has no registered name,
* process_info(Pid, registered_name) returns [], and
@@ -1078,138 +1348,170 @@ process_info_aux(Process *BIF_P,
* registered_name behaves as it should, i.e. a
* {registered_name, []} will appear in the resulting list.
*
- * If always_wrap != 0, process_info_aux() always wrap the result
- * in a key two tuple.
+ * If ERTS_PI_FLAG_ALWAYS_WRAP is set, process_info_aux() always
+ * wrap the result in a key two tuple.
*/
- switch (item) {
+ switch (item_ix) {
- case am_registered_name:
- if (rp->common.u.alive.reg) {
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_REGISTERED_NAME:
+ if (rp->common.u.alive.reg)
res = rp->common.u.alive.reg->name;
- } else {
- if (always_wrap) {
- hp = HAlloc(BIF_P, 3);
+ else {
+ if (flags & ERTS_PI_FLAG_ALWAYS_WRAP)
res = NIL;
- }
- else {
+ else
return NIL;
- }
}
break;
- case am_current_function:
- res = current_function(BIF_P, rp, &hp, 0);
+ case ERTS_PI_IX_CURRENT_FUNCTION:
+ res = current_function(c_p, hfact, rp, 0,
+ reserve_size, flags);
break;
- case am_current_location:
- res = current_function(BIF_P, rp, &hp, 1);
+ case ERTS_PI_IX_CURRENT_LOCATION:
+ res = current_function(c_p, hfact, rp, 1,
+ reserve_size, flags);
break;
- case am_current_stacktrace:
- res = current_stacktrace(BIF_P, rp, &hp);
+ case ERTS_PI_IX_CURRENT_STACKTRACE:
+ res = current_stacktrace(hfact, rp, reserve_size);
break;
- case am_initial_call:
- hp = HAlloc(BIF_P, 3+4);
+ case ERTS_PI_IX_INITIAL_CALL:
+ hp = erts_produce_heap(hfact, 4, reserve_size);
res = TUPLE3(hp,
- rp->u.initial[INITIAL_MOD],
- rp->u.initial[INITIAL_FUN],
- make_small(rp->u.initial[INITIAL_ARI]));
+ rp->u.initial.module,
+ rp->u.initial.function,
+ make_small(rp->u.initial.arity));
hp += 4;
break;
- case am_status:
- res = erts_process_status(rp, rpid);
- ASSERT(res != am_undefined);
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_STATUS: {
+ erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
+ res = erts_process_state2status(state);
+ if (res == am_running && (state & ERTS_PSFLG_RUNNING_SYS)) {
+ ASSERT(c_p == rp);
+ ASSERT(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
+ if (!(state & (ERTS_PSFLG_SYS_TASKS
+ | ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_SIG_Q
+ | ERTS_PSFLG_SIG_IN_Q))) {
+ /*
+ * We are servicing a process-info request from
+ * another process. If that other process could
+ * have inspected our state itself, we would have
+ * been in the 'waiting' state.
+ */
+ res = am_waiting;
+ }
+ }
break;
+ }
- case am_messages: {
-
- if (rp->msg.len == 0 || ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
- hp = HAlloc(BIF_P, 3);
- } else {
+ case ERTS_PI_IX_MESSAGES: {
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ if (rp->sig_qs.len == 0 || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE))
+ res = NIL;
+ else {
+ int info_on_self = !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
ErtsMessageInfo *mip;
- Sint i;
+ Sint i, len;
Uint heap_need;
-#ifdef DEBUG
- Eterm *hp_end;
-#endif
mip = erts_alloc(ERTS_ALC_T_TMP,
- rp->msg.len*sizeof(ErtsMessageInfo));
+ rp->sig_qs.len*sizeof(ErtsMessageInfo));
/*
* Note that message queue may shrink when calling
- * erts_prep_msgq_for_inspection() since it removes
+ * erts_proc_sig_prep_msgq_for_inspection() since it removes
* corrupt distribution messages.
*/
- heap_need = erts_prep_msgq_for_inspection(BIF_P, rp, rp_locks, mip);
- heap_need += 3; /* top 2-tuple */
- heap_need += rp->msg.len*2; /* Cons cells */
+ heap_need = erts_proc_sig_prep_msgq_for_inspection(c_p, rp,
+ rp_locks,
+ info_on_self,
+ mip);
+ len = rp->sig_qs.len;
- hp = HAlloc(BIF_P, heap_need); /* heap_need is exact */
-#ifdef DEBUG
- hp_end = hp + heap_need;
-#endif
+ heap_need += len*2; /* Cons cells */
+
+ reserve_size += heap_need;
/* Build list of messages... */
- for (i = rp->msg.len - 1, res = NIL; i >= 0; i--) {
+ for (i = len - 1, res = NIL; i >= 0; i--) {
Eterm msg = ERL_MESSAGE_TERM(mip[i].msgp);
Uint sz = mip[i].size;
+ ERTS_PI_UNRESERVE(reserve_size, sz+2);
+ hp = erts_produce_heap(hfact, sz+2, reserve_size);
+
if (sz != 0)
- msg = copy_struct(msg, sz, &hp, &BIF_P->off_heap);
+ msg = copy_struct(msg, sz, &hp, hfact->off_heap);
res = CONS(hp, msg, res);
hp += 2;
}
- ASSERT(hp_end == hp + 3);
+ *reds += (Uint) len / 4;
erts_free(ERTS_ALC_T_TMP, mip);
}
break;
}
- case am_message_queue_len:
- hp = HAlloc(BIF_P, 3);
- res = make_small(rp->msg.len);
+ case ERTS_PI_IX_MESSAGE_QUEUE_LEN: {
+ Sint len = rp->sig_qs.len;
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ ASSERT(len >= 0);
+ if (len <= MAX_SMALL)
+ res = make_small(len);
+ else {
+ hp = erts_produce_heap(hfact, BIG_UINT_HEAP_SIZE, reserve_size);
+ res = uint_to_big((Uint) len, hp);
+ }
break;
+ }
- case am_links: {
+ case ERTS_PI_IX_LINKS: {
MonitorInfoCollection mic;
int i;
Eterm item;
INIT_MONITOR_INFOS(mic);
- erts_doforall_links(ERTS_P_LINKS(rp),&collect_one_link,&mic);
+ erts_link_tree_foreach(ERTS_P_LINKS(rp), collect_one_link, (void *) &mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ reserve_size += mic.sz;
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ Eterm item_src = mic.mi[i].entity.term;
+ Uint sz = NC_HEAP_SIZE(item_src) + 2;
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+ item = STORE_NC(&hp, hfact->off_heap, item_src);
res = CONS(hp, item, res);
- hp += 2;
}
+
+ *reds += (Uint) mic.mi_i / 4;
+
DESTROY_MONITOR_INFOS(mic);
break;
}
- case am_monitors: {
+ case ERTS_PI_IX_MONITORS: {
MonitorInfoCollection mic;
int i;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(ERTS_P_MONITORS(rp),
- &collect_one_origin_monitor, &mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
+ collect_one_origin_monitor,
+ (void *) &mic);
+
+ reserve_size += mic.sz;
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- if (is_atom(mic.mi[i].entity)) {
+ if (mic.mi[i].named) {
/* Monitor by name.
* Build {process|port, {Name, Node}} and cons it.
*/
@@ -1221,175 +1523,228 @@ process_info_aux(Process *BIF_P,
|| is_port(mic.mi[i].pid)
|| is_atom(mic.mi[i].pid));
- t1 = TUPLE2(hp, mic.mi[i].entity, mic.mi[i].node);
+ ERTS_PI_UNRESERVE(reserve_size, 3+3+2);
+ hp = erts_produce_heap(hfact, 3+3+2, reserve_size);
+
+ t1 = TUPLE2(hp, mic.mi[i].entity.term, mic.mi[i].node);
hp += 3;
t2 = TUPLE2(hp, m_type, t1);
hp += 3;
res = CONS(hp, t2, res);
- hp += 2;
}
else {
- /* Monitor by pid. Build {process|port, Pid} and cons it. */
+ /* Build {process|port|time_offset, Pid|clock_service} and cons it. */
Eterm t;
- Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ Eterm pid;
+ Eterm m_type;
+ Eterm pid_src = mic.mi[i].entity.term;
+ Uint sz = is_atom(pid_src) ? 0 : NC_HEAP_SIZE(pid_src);
+ sz += 3 + 2;
+
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
+ pid = (is_atom(pid_src)
+ ? pid_src
+ : STORE_NC(&hp, hfact->off_heap, pid_src));
+
+ switch (mic.mi[i].type) {
+ case ERTS_MON_TYPE_PORT:
+ m_type = am_port;
+ break;
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ m_type = am_time_offset;
+ break;
+ default:
+ m_type = am_process;
+ break;
+ }
- Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
ASSERT(is_pid(mic.mi[i].pid)
|| is_port(mic.mi[i].pid));
t = TUPLE2(hp, m_type, pid);
hp += 3;
res = CONS(hp, t, res);
- hp += 2;
}
}
+
+ *reds += (Uint) mic.mi_i / 4;
+
DESTROY_MONITOR_INFOS(mic);
break;
}
- case am_monitored_by: {
+ case ERTS_PI_IX_MONITORED_BY: {
MonitorInfoCollection mic;
int i;
Eterm item;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(ERTS_P_MONITORS(rp),&collect_one_target_monitor,&mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(rp),
+ collect_one_target_monitor,
+ (void *) &mic);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
+ collect_one_target_monitor,
+ (void *) &mic);
+
+ reserve_size += mic.sz;
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ Uint sz = 2;
+
+ if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
+ sz += erts_resource_ref_size(mic.mi[i].entity.resource);
+ else
+ sz += NC_HEAP_SIZE(mic.mi[i].entity.term);
+
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
+ if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
+ item = erts_bld_resource_ref(&hp,
+ hfact->off_heap,
+ mic.mi[i].entity.resource);
+ else
+ item = STORE_NC(&hp,
+ hfact->off_heap,
+ mic.mi[i].entity.term);
res = CONS(hp, item, res);
- hp += 2;
}
+
+ *reds += (Uint) mic.mi_i / 4;
+
DESTROY_MONITOR_INFOS(mic);
break;
}
- case am_suspending: {
+ case ERTS_PI_IX_SUSPENDING: {
ErtsSuspendMonitorInfoCollection smic;
int i;
- Eterm item;
-#ifdef DEBUG
- Eterm *hp_end;
-#endif
- ERTS_INIT_SUSPEND_MONITOR_INFOS(smic,
- BIF_P,
- (BIF_P == rp
- ? ERTS_PROC_LOCK_MAIN
- : 0) | ERTS_PROC_LOCK_LINK);
+ ERTS_INIT_SUSPEND_MONITOR_INFOS(smic);
+
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
+ collect_one_suspend_monitor,
+ (void *) &smic);
+
+ reserve_size += smic.sz;
- erts_doforall_suspend_monitors(rp->suspend_monitors,
- &collect_one_suspend_monitor,
- &smic);
- hp = HAlloc(BIF_P, 3 + smic.sz);
-#ifdef DEBUG
- hp_end = hp + smic.sz;
-#endif
-
res = NIL;
for (i = 0; i < smic.smi_i; i++) {
- Sint a = (Sint) smic.smi[i]->active; /* quiet compiler warnings */
- Sint p = (Sint) smic.smi[i]->pending; /* on 64-bit machines... */
- Eterm active;
- Eterm pending;
- if (IS_SSMALL(a))
- active = make_small(a);
- else {
- active = small_to_big(a, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- if (IS_SSMALL(p))
- pending = make_small(p);
- else {
- pending = small_to_big(p, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- item = TUPLE3(hp, smic.smi[i]->pid, active, pending);
+ ErtsMonitorSuspend *msp;
+ erts_aint_t mstate;
+ Sint ci;
+ Eterm ct, active, pending, item;
+ Uint sz = 4 + 2;
+
+ msp = smic.smi[i];
+ mstate = erts_atomic_read_nob(&msp->state);
+
+ ci = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
+ if (!IS_SSMALL(ci))
+ sz += BIG_UINT_HEAP_SIZE;
+
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
+ if (IS_SSMALL(ci))
+ ct = make_small(ci);
+ else {
+ ct = small_to_big(ci, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+
+ if (mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE) {
+ active = ct;
+ pending = make_small(0);
+ }
+ else {
+ active = make_small(0);
+ pending = ct;
+ }
+
+ ASSERT(is_internal_pid(msp->md.origin.other.item));
+
+ item = TUPLE3(hp, msp->md.origin.other.item, active, pending);
hp += 4;
res = CONS(hp, item, res);
- hp += 2;
}
+ *reds += (Uint) smic.smi_i / 4;
+
ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
- ASSERT(hp == hp_end);
break;
}
- case am_dictionary:
- if (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
+ case ERTS_PI_IX_DICTIONARY:
+ if (!rp->dictionary || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE)) {
res = NIL;
} else {
- res = erts_dictionary_copy(BIF_P, rp->dictionary);
+ Uint num = rp->dictionary->numElements;
+ res = erts_dictionary_copy(hfact, rp->dictionary, reserve_size);
+ *reds += (Uint) num / 4;
}
- hp = HAlloc(BIF_P, 3);
+
break;
- case am_trap_exit: {
- erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
- hp = HAlloc(BIF_P, 3);
- if (state & ERTS_PSFLG_TRAP_EXIT)
- res = am_true;
- else
- res = am_false;
+ case ERTS_PI_IX_TRAP_EXIT:
+ res = (rp->flags & F_TRAP_EXIT) ? am_true : am_false;
break;
- }
- case am_error_handler:
- hp = HAlloc(BIF_P, 3);
- res = erts_proc_get_error_handler(BIF_P);
+ case ERTS_PI_IX_ERROR_HANDLER:
+ res = erts_proc_get_error_handler(rp);
break;
- case am_heap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_HEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
break;
}
- case am_fullsweep_after: {
- Uint hsz = 3;
+ case ERTS_PI_IX_FULLSWEEP_AFTER: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, MAX_GEN_GCS(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, MAX_GEN_GCS(rp));
break;
}
- case am_min_heap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_MIN_HEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, MIN_HEAP_SIZE(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, MIN_HEAP_SIZE(rp));
break;
}
- case am_min_bin_vheap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_MIN_BIN_VHEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, MIN_VHEAP_SIZE(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, MIN_VHEAP_SIZE(rp));
break;
}
- case am_max_heap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_MAX_HEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
MAX_HEAP_SIZE_FLAGS_GET(rp),
NULL, &hsz);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
MAX_HEAP_SIZE_FLAGS_GET(rp),
&hp, NULL);
break;
}
- case am_total_heap_size: {
- ErtsMessage *mp;
+ case ERTS_PI_IX_TOTAL_HEAP_SIZE: {
Uint total_heap_size;
- Uint hsz = 3;
+ Uint hsz = 0;
total_heap_size = rp->heap_sz;
if (rp->old_hend && rp->old_heap)
@@ -1397,44 +1752,53 @@ process_info_aux(Process *BIF_P,
total_heap_size += rp->mbuf_sz;
- if (rp->flags & F_ON_HEAP_MSGQ)
- for (mp = rp->msg.first; mp; mp = mp->next)
+ if (rp->flags & F_ON_HEAP_MSGQ) {
+ ErtsMessage *mp;
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ for (mp = rp->sig_qs.first; mp; mp = mp->next) {
+ ASSERT(ERTS_SIG_IS_MSG(mp));
if (mp->data.attached)
total_heap_size += erts_msg_attached_data_size(mp);
+ }
+ *reds += (Uint) rp->sig_qs.len / 4;
+ }
(void) erts_bld_uint(NULL, &hsz, total_heap_size);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, total_heap_size);
break;
}
- case am_stack_size: {
+ case ERTS_PI_IX_STACK_SIZE: {
Uint stack_size = STACK_START(rp) - rp->stop;
- Uint hsz = 3;
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, stack_size);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, stack_size);
break;
}
- case am_memory: { /* Memory consumed in bytes */
- Uint hsz = 3;
+ case ERTS_PI_IX_MEMORY: { /* Memory consumed in bytes */
+ Uint hsz = 0;
Uint size = erts_process_memory(rp, 0);
(void) erts_bld_uint(NULL, &hsz, size);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, size);
+
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ *reds += (Uint) rp->sig_qs.len / 4;
+
break;
}
- case am_garbage_collection: {
+ case ERTS_PI_IX_GARBAGE_COLLECTION: {
DECL_AM(minor_gcs);
Eterm t;
Uint map_sz = 0;
erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), NULL, &map_sz);
- hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz + 3);
- /* last "3" is for outside tuple */
+ hp = erts_produce_heap(hfact, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz, reserve_size);
t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
res = CONS(hp, t, NIL); hp += 2;
@@ -1453,89 +1817,76 @@ process_info_aux(Process *BIF_P,
break;
}
- case am_garbage_collection_info: {
+ case ERTS_PI_IX_GARBAGE_COLLECTION_INFO: {
Uint sz = 0, actual_sz = 0;
- if (rp == BIF_P) {
- sz += ERTS_PROCESS_GC_INFO_MAX_SIZE;
- } else {
- erts_process_gc_info(rp, &sz, NULL, 0, 0);
- sz += 3;
- }
+ erts_process_gc_info(rp, &sz, NULL, 0, 0);
- hp = HAlloc(BIF_P, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
res = erts_process_gc_info(rp, &actual_sz, &hp, 0, 0);
- /* We may have some extra space, fill with 0 tuples */
- if (actual_sz <= sz - 3) {
- for (; actual_sz < sz - 3; hp++, actual_sz++)
- hp[0] = make_arityval(0);
- } else {
- for (; actual_sz < sz; hp++, actual_sz++)
- hp[0] = make_arityval(0);
- hp = HAlloc(BIF_P, 3);
- }
-
break;
}
- case am_group_leader: {
+ case ERTS_PI_IX_GROUP_LEADER: {
int sz = NC_HEAP_SIZE(rp->group_leader);
- hp = HAlloc(BIF_P, 3 + sz);
- res = STORE_NC(&hp, &MSO(BIF_P), rp->group_leader);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+ res = STORE_NC(&hp, hfact->off_heap, rp->group_leader);
break;
}
- case am_reductions: {
- Uint reds = rp->reds + erts_current_reductions(BIF_P, rp);
- Uint hsz = 3;
+ case ERTS_PI_IX_REDUCTIONS: {
+ Uint reds = rp->reds + erts_current_reductions(c_p, rp);
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, reds);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, reds);
break;
}
- case am_priority:
- hp = HAlloc(BIF_P, 3);
- res = erts_get_process_priority(rp);
+ case ERTS_PI_IX_PRIORITY: {
+ erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
+ if (ERTS_PSFLG_EXITING & state)
+ return am_undefined;
+ res = erts_get_process_priority(state);
break;
+ }
- case am_trace:
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_TRACE:
res = make_small(ERTS_TRACE_FLAGS(rp) & TRACEE_FLAGS);
break;
- case am_binary: {
- Uint sz = 3;
+ case ERTS_PI_IX_BINARY: {
+ Uint sz = 0;
(void) bld_bin_list(NULL, &sz, &MSO(rp));
- hp = HAlloc(BIF_P, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
res = bld_bin_list(&hp, NULL, &MSO(rp));
break;
}
- case am_sequential_trace_token:
- res = copy_object(rp->seq_trace_token, BIF_P);
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN: {
+ Uint sz = size_object(rp->seq_trace_token);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+ res = copy_struct(rp->seq_trace_token, sz, &hp, hfact->off_heap);
break;
+ }
- case am_catchlevel:
- hp = HAlloc(BIF_P, 3);
- res = make_small(catchlevel(BIF_P));
+ case ERTS_PI_IX_CATCHLEVEL:
+ res = make_small(catchlevel(rp));
break;
- case am_backtrace: {
+ case ERTS_PI_IX_BACKTRACE: {
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
- res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
+ res = erts_heap_factory_new_binary(hfact, (byte *) dsbufp->str,
+ dsbufp->str_len, reserve_size);
erts_destroy_tmp_dsbuf(dsbufp);
- hp = HAlloc(BIF_P, 3);
break;
}
- case am_last_calls: {
+ case ERTS_PI_IX_LAST_CALLS: {
struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
if (!scb) {
- hp = HAlloc(BIF_P, 3);
res = am_false;
} else {
/*
@@ -1543,44 +1894,55 @@ process_info_aux(Process *BIF_P,
* Might be less than that, if there are sends, receives or timeouts,
* so we must do a HRelease() to avoid creating holes.
*/
- Uint needed = scb->n*(2+4) + 3;
- Eterm* limit;
+ Sint needed = scb->n*(2+4);
Eterm term, list;
int i, j;
+ Export *exp;
+
+ reserve_size += needed;
- hp = HAlloc(BIF_P, needed);
- limit = hp + needed;
list = NIL;
for (i = 0; i < scb->n; i++) {
+ Uint sz;
j = scb->cur - i - 1;
if (j < 0)
j += scb->len;
- if (scb->ct[j] == &exp_send)
+
+ sz = 2;
+ exp = scb->ct[j];
+ if (exp != &exp_send && exp != &exp_receive && exp != &exp_timeout)
+ sz += 4;
+
+ needed -= sz;
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
+ if (exp == &exp_send)
term = am_send;
- else if (scb->ct[j] == &exp_receive)
+ else if (exp == &exp_receive)
term = am_receive;
- else if (scb->ct[j] == &exp_timeout)
+ else if (exp == &exp_timeout)
term = am_timeout;
else {
term = TUPLE3(hp,
- scb->ct[j]->code[0],
- scb->ct[j]->code[1],
- make_small(scb->ct[j]->code[2]));
+ scb->ct[j]->info.mfa.module,
+ scb->ct[j]->info.mfa.function,
+ make_small(scb->ct[j]->info.mfa.arity));
hp += 4;
}
list = CONS(hp, term, list);
- hp += 2;
}
+
+ ASSERT(needed >= 0);
+ if (needed > 0)
+ reserve_size -= needed;
+
res = list;
- res = TUPLE2(hp, item, res);
- hp += 3;
- HRelease(BIF_P,limit,hp);
- return res;
}
break;
}
- case am_message_queue_data:
+ case ERTS_PI_IX_MESSAGE_QUEUE_DATA:
switch (rp->flags & (F_OFF_HEAP_MSGQ|F_ON_HEAP_MSGQ)) {
case F_OFF_HEAP_MSGQ:
res = am_off_heap;
@@ -1593,20 +1955,34 @@ process_info_aux(Process *BIF_P,
ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
break;
}
- hp = HAlloc(BIF_P, 3);
break;
+ case ERTS_PI_IX_MAGIC_REF: {
+ Uint sz = 0;
+ (void) bld_magic_ref_bin_list(NULL, &sz, &MSO(rp));
+ hp = erts_produce_heap(hfact, sz, 0);
+ res = bld_magic_ref_bin_list(&hp, NULL, &MSO(rp));
+
+ *reds += (Uint) 10;
+ break;
+ }
+
default:
return THE_NON_VALUE; /* will produce badarg */
}
- return TUPLE2(hp, item, res);
+ ERTS_PI_UNRESERVE(reserve_size, 3);
+ *reserve_sizep = reserve_size;
+ hp = erts_produce_heap(hfact, 3, reserve_size);
+
+ return TUPLE2(hp, pi_ix2arg(item_ix), res);
}
#undef MI_INC
static Eterm
-current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
+current_function(Process *c_p, ErtsHeapFactory *hfact, Process* rp,
+ int full_info, Uint reserve_size, int flags)
{
Eterm* hp;
Eterm res;
@@ -1614,16 +1990,16 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
if (rp->current == NULL) {
erts_lookup_function_info(&fi, rp->i, full_info);
- rp->current = fi.current;
+ rp->current = fi.mfa;
} else if (full_info) {
erts_lookup_function_info(&fi, rp->i, full_info);
- if (fi.current == NULL) {
+ if (fi.mfa == NULL) {
/* Use the current function without location info */
erts_set_current_function(&fi, rp->current);
}
}
- if (BIF_P == rp) {
+ if (c_p == rp && !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER)) {
FunctionInfo fi2;
/*
@@ -1633,9 +2009,9 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
* instead if it can be looked up.
*/
erts_lookup_function_info(&fi2, rp->cp, full_info);
- if (fi2.current) {
+ if (fi2.mfa) {
fi = fi2;
- rp->current = fi2.current;
+ rp->current = fi2.mfa;
}
}
@@ -1643,23 +2019,22 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
* Return the result.
*/
if (rp->current == NULL) {
- hp = HAlloc(BIF_P, 3);
res = am_undefined;
} else if (full_info) {
- hp = HAlloc(BIF_P, 3+fi.needed);
- hp = erts_build_mfa_item(&fi, hp, am_true, &res);
+ hp = erts_produce_heap(hfact, fi.needed, reserve_size);
+ erts_build_mfa_item(&fi, hp, am_true, &res);
} else {
- hp = HAlloc(BIF_P, 3+4);
- res = TUPLE3(hp, rp->current[0],
- rp->current[1], make_small(rp->current[2]));
- hp += 4;
+ hp = erts_produce_heap(hfact, 4, reserve_size);
+ res = TUPLE3(hp, rp->current->module,
+ rp->current->function,
+ make_small(rp->current->arity));
}
- *hpp = hp;
return res;
}
static Eterm
-current_stacktrace(Process* p, Process* rp, Eterm** hpp)
+current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
+ Uint reserve_size)
{
Uint sz;
struct StackTrace* s;
@@ -1668,15 +2043,15 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
FunctionInfo* stkp;
Uint heap_size;
int i;
- Eterm* hp = *hpp;
+ Eterm* hp;
Eterm mfa;
Eterm res = NIL;
- depth = 8;
+ depth = erts_backtrace_depth;
sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
s->depth = 0;
- if (rp->i) {
+ if (depth > 0 && rp->i) {
s->trace[s->depth++] = rp->i;
depth--;
}
@@ -1692,37 +2067,32 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
heap_size = 3;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
- if (stkp->current) {
+ if (stkp->mfa) {
heap_size += stkp->needed + 2;
stkp++;
}
}
- hp = HAlloc(p, heap_size);
+ reserve_size += heap_size;
+
+ /*
+ * We intentionally produce heap in small chunks
+ * (for more info see process_info_aux()).
+ */
while (stkp > stk) {
stkp--;
+ sz = stkp->needed + 2;
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
res = CONS(hp, mfa, res);
- hp += 2;
}
erts_free(ERTS_ALC_T_TMP, stk);
erts_free(ERTS_ALC_T_TMP, s);
- *hpp = hp;
return res;
}
-#if defined(VALGRIND)
-static int check_if_xml(void)
-{
- char buf[1];
- size_t bufsz = sizeof(buf);
- return erts_sys_getenv_raw("VALGRIND_LOG_XML", buf, &bufsz) >= 0;
-}
-#else
-#define check_if_xml() 0
-#endif
-
/*
* This function takes care of calls to erlang:system_info/1 when the argument
* is a tuple.
@@ -1766,45 +2136,6 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
return make_small(sizeof(UWord));
}
goto badarg;
- } else if (sel == am_allocated) {
- if (arity == 2) {
- Eterm res = THE_NON_VALUE;
- char *buf;
- Sint len = is_string(*tp);
- if (len <= 0)
- return res;
- buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
- if (intlist_to_buf(*tp, buf, len) != len)
- erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
- buf[len] = '\0';
- res = erts_instr_dump_memory_map(buf) ? am_true : am_false;
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- if (is_non_value(res))
- goto badarg;
- return res;
- }
- else if (arity == 3 && tp[0] == am_status) {
- if (is_atom(tp[1]))
- return erts_instr_get_stat(BIF_P, tp[1], 1);
- else {
- Eterm res = THE_NON_VALUE;
- char *buf;
- Sint len = is_string(tp[1]);
- if (len <= 0)
- return res;
- buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
- if (intlist_to_buf(tp[1], buf, len) != len)
- erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
- buf[len] = '\0';
- res = erts_instr_dump_stat(buf, 1) ? am_true : am_false;
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- if (is_non_value(res))
- goto badarg;
- return res;
- }
- }
- else
- goto badarg;
} else if (sel == am_allocator) {
switch (arity) {
case 2:
@@ -1858,15 +2189,9 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
#endif
} else if (is_list(*tp)) {
#if defined(PURIFY)
-#define ERTS_ERROR_CHECKER_PRINTF purify_printf
-#define ERTS_ERROR_CHECKER_PRINTF_XML purify_printf
+# define ERTS_ERROR_CHECKER_PRINTF purify_printf
#elif defined(VALGRIND)
-#define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
-# ifndef HAVE_VALGRIND_PRINTF_XML
-# define ERTS_ERROR_CHECKER_PRINTF_XML VALGRIND_PRINTF
-# else
-# define ERTS_ERROR_CHECKER_PRINTF_XML VALGRIND_PRINTF_XML
-# endif
+# define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
#endif
ErlDrvSizeT buf_size = 8*1024; /* Try with 8KB first */
char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
@@ -1882,12 +2207,7 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
ASSERT(r == buf_size - 1);
}
buf[buf_size - 1 - r] = '\0';
- if (check_if_xml()) {
- ERTS_ERROR_CHECKER_PRINTF_XML("<erlang_info_log>"
- "%s</erlang_info_log>\n", buf);
- } else {
- ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
- }
+ ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(am_true);
#undef ERTS_ERROR_CHECKER_PRINTF
@@ -2085,14 +2405,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
ASSERT(erts_compat_rel > 0);
BIF_RET(make_small(erts_compat_rel));
} else if (BIF_ARG_1 == am_multi_scheduling) {
-#ifndef ERTS_SMP
- BIF_RET(am_disabled);
-#else
-#ifndef ERTS_DIRTY_SCHEDULERS
- if (erts_no_schedulers == 1)
- BIF_RET(am_disabled);
- else
-#endif
{
int msb = erts_is_multi_scheduling_blocked();
BIF_RET(!msb
@@ -2101,7 +2413,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
? am_blocked
: am_blocked_normal));
}
-#endif
} else if (BIF_ARG_1 == am_build_type) {
#if defined(DEBUG)
ERTS_DECL_AM(debug);
@@ -2181,8 +2492,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (BIF_ARG_1 == am_allocated_areas) {
res = erts_allocated_areas(NULL, NULL, BIF_P);
BIF_RET(res);
- } else if (BIF_ARG_1 == am_allocated) {
- BIF_RET(erts_instr_get_memory_map(BIF_P));
} else if (BIF_ARG_1 == am_hipe_architecture) {
#if defined(HIPE)
BIF_RET(hipe_arch_name);
@@ -2211,7 +2520,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = TUPLE2(hp, am_sequential_tracer, val);
BIF_RET(res);
} else if (BIF_ARG_1 == am_garbage_collection){
- Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
Eterm tup;
hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2);
@@ -2229,7 +2538,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
} else if (BIF_ARG_1 == am_fullsweep_after){
- Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_fullsweep_after, make_small(val));
BIF_RET(res);
@@ -2262,8 +2571,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0);
/* Need to be the only thread running... */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
if (BIF_ARG_1 == am_info)
info(ERTS_PRINT_DSBUF, (void *) dsbufp);
@@ -2274,8 +2583,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else
distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
ASSERT(dsbufp && dsbufp->str);
res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
@@ -2284,7 +2593,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) {
DistEntry *dep;
i = 0;
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
for (dep = erts_visible_dist_entries; dep; dep = dep->next)
++i;
for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
@@ -2307,7 +2616,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = CONS(hp, tpl, res);
hp += 2;
}
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
BIF_RET(res);
} else if (BIF_ARG_1 == am_system_version) {
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
@@ -2323,9 +2632,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
sizeof(ERLANG_ARCHITECTURE)-1,
NIL));
}
- else if (BIF_ARG_1 == am_memory_types) {
- return erts_instr_get_type_info(BIF_P);
- }
else if (BIF_ARG_1 == am_os_type) {
BIF_RET(os_type_tuple);
}
@@ -2333,16 +2639,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_allocator_options((void *) BIF_P));
}
else if (BIF_ARG_1 == am_thread_pool_size) {
-#ifdef USE_THREADS
extern int erts_async_max_threads;
-#endif
int n;
-#ifdef USE_THREADS
n = erts_async_max_threads;
-#else
- n = 0;
-#endif
BIF_RET(make_small(n));
}
else if (BIF_ARG_1 == am_alloc_util_allocators) {
@@ -2356,12 +2656,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(os_version_tuple);
}
else if (BIF_ARG_1 == am_version) {
- int n = strlen(ERLANG_VERSION);
+ int n = sys_strlen(ERLANG_VERSION);
hp = HAlloc(BIF_P, ((sizeof ERLANG_VERSION)-1) * 2);
BIF_RET(buf_to_intlist(&hp, ERLANG_VERSION, n, NIL));
}
else if (BIF_ARG_1 == am_machine) {
- int n = strlen(EMULATOR);
+ int n = sys_strlen(EMULATOR);
hp = HAlloc(BIF_P, n*2);
BIF_RET(buf_to_intlist(&hp, EMULATOR, n, NIL));
}
@@ -2387,11 +2687,11 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = erts_bld_cons(hpp, hszp,
erts_bld_tuple(hpp, hszp, 2,
erts_atom_put((byte *)opc[i].name,
- strlen(opc[i].name),
+ sys_strlen(opc[i].name),
ERTS_ATOM_ENC_LATIN1,
1),
erts_bld_uint(hpp, hszp,
- opc[i].count)),
+ erts_instr_count[i])),
res);
}
@@ -2410,7 +2710,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
#endif
BIF_RET(res);
-#endif /* #ifndef ERTS_SMP */
+#endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
} else if (BIF_ARG_1 == am_wordsize) {
return make_small(sizeof(Eterm));
} else if (BIF_ARG_1 == am_endian) {
@@ -2490,11 +2790,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
#endif
} else if (BIF_ARG_1 == am_threads) {
-#ifdef USE_THREADS
return am_true;
-#else
- return am_false;
-#endif
} else if (BIF_ARG_1 == am_creation) {
return make_small(erts_this_node->creation);
} else if (BIF_ARG_1 == am_break_ignored) {
@@ -2553,11 +2849,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp = HAlloc(BIF_P, 2*n);
BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
} else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
-#ifdef ERTS_SMP
BIF_RET(am_true);
-#else
- BIF_RET(am_false);
-#endif
} else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
BIF_RET(erts_bound_schedulers_term(BIF_P));
} else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) {
@@ -2569,11 +2861,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = make_small(erts_no_schedulers);
BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- Eterm *hp = HAlloc(BIF_P, 4);
- res = TUPLE3(hp, make_small(1), make_small(1), make_small(1));
- BIF_RET(res);
-#else
Eterm *hp;
Uint total, online, active;
erts_schedulers_state(&total, &online, &active,
@@ -2584,13 +2871,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
make_small(online),
make_small(active));
BIF_RET(res);
-#endif
} else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- Eterm *hp = HAlloc(BIF_P, 4);
- res = TUPLE3(hp, make_small(1), make_small(1), make_small(1));
- BIF_RET(res);
-#else
Eterm *hp;
Uint total, online, active;
erts_schedulers_state(&total, &online, &active,
@@ -2601,19 +2882,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
make_small(online),
make_small(active));
BIF_RET(res);
-#endif
} else if (ERTS_IS_ATOM_STR("all_schedulers_state", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- Eterm *hp = HAlloc(BIF_P, 2+5);
- res = CONS(hp+5,
- TUPLE4(hp,
- am_normal,
- make_small(1),
- make_small(1),
- make_small(1)),
- NIL);
- BIF_RET(res);
-#else
Eterm *hp, tpl;
Uint sz, total, online, active,
dirty_cpu_total, dirty_cpu_online, dirty_cpu_active,
@@ -2659,24 +2928,14 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp += 5;
res = CONS(hp, tpl, res);
BIF_RET(res);
-#endif
} else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(make_small(1));
-#else
Uint online;
erts_schedulers_state(NULL, &online, NULL, NULL, NULL, NULL, NULL, NULL);
BIF_RET(make_small(online));
-#endif
} else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(make_small(1));
-#else
Uint active;
erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL);
BIF_RET(make_small(active));
-#endif
-#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
} else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
Uint dirty_cpu;
erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL);
@@ -2689,7 +2948,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
Uint dirty_io;
erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL);
BIF_RET(make_small(dirty_io));
-#endif
} else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
res = make_small(erts_no_run_queues);
BIF_RET(res);
@@ -2710,7 +2968,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(make_small(CONTEXT_REDS));
} else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
#ifdef ERTS_ENABLE_KERNEL_POLL
- BIF_RET(erts_use_kernel_poll ? am_true : am_false);
+ BIF_RET(am_true);
#else
BIF_RET(am_false);
#endif
@@ -2735,23 +2993,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) {
BIF_RET(erts_check_io_info(BIF_P));
} else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(NIL);
-#else
if (erts_no_schedulers == 1)
BIF_RET(NIL);
else
BIF_RET(erts_multi_scheduling_blockers(BIF_P, 0));
-#endif
} else if (ERTS_IS_ATOM_STR("normal_multi_scheduling_blockers", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(NIL);
-#else
if (erts_no_schedulers == 1)
BIF_RET(NIL);
else
BIF_RET(erts_multi_scheduling_blockers(BIF_P, 1));
-#endif
} else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) {
BIF_RET(ERTS_USE_MODIFIED_TIMING()
? make_small(erts_modified_timing_level)
@@ -2814,12 +3064,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_false);
#endif
}
-#ifdef ERTS_SMP
else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) {
erts_thr_progress_dbg_print_state();
BIF_RET(am_true);
}
-#endif
else if (BIF_ARG_1 == am_message_queue_data) {
switch (erts_default_spo_flags & (SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ)) {
case SPO_OFF_HEAP_MSGQ:
@@ -2835,21 +3083,21 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
Uint sz;
Eterm res = NIL, tup, text;
Eterm *hp = HAlloc(BIF_P, 3*(2 + 3) + /* three 2-tuples and three cons */
- 2*(strlen(erts_build_flags_CONFIG_H) +
- strlen(erts_build_flags_CFLAGS) +
- strlen(erts_build_flags_LDFLAGS)));
+ 2*(sys_strlen(erts_build_flags_CONFIG_H) +
+ sys_strlen(erts_build_flags_CFLAGS) +
+ sys_strlen(erts_build_flags_LDFLAGS)));
- sz = strlen(erts_build_flags_CONFIG_H);
+ sz = sys_strlen(erts_build_flags_CONFIG_H);
text = buf_to_intlist(&hp, erts_build_flags_CONFIG_H, sz, NIL);
tup = TUPLE2(hp, am_config_h, text); hp += 3;
res = CONS(hp, tup, res); hp += 2;
- sz = strlen(erts_build_flags_CFLAGS);
+ sz = sys_strlen(erts_build_flags_CFLAGS);
text = buf_to_intlist(&hp, erts_build_flags_CFLAGS, sz, NIL);
tup = TUPLE2(hp, am_cflags, text); hp += 3;
res = CONS(hp, tup, res); hp += 2;
- sz = strlen(erts_build_flags_LDFLAGS);
+ sz = sys_strlen(erts_build_flags_LDFLAGS);
text = buf_to_intlist(&hp, erts_build_flags_LDFLAGS, sz, NIL);
tup = TUPLE2(hp, am_ldflags, text); hp += 3;
res = CONS(hp, tup, res); hp += 2;
@@ -2859,6 +3107,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
BIF_RET(make_small(erts_db_get_max_tabs()));
}
+ else if (ERTS_IS_ATOM_STR("ets_count",BIF_ARG_1)) {
+ BIF_RET(make_small(erts_ets_table_count()));
+ }
+ else if (ERTS_IS_ATOM_STR("atom_limit",BIF_ARG_1)) {
+ BIF_RET(make_small(erts_get_atom_limit()));
+ }
+ else if (ERTS_IS_ATOM_STR("atom_count",BIF_ARG_1)) {
+ BIF_RET(make_small(atom_table_size()));
+ }
else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
if (erts_has_time_correction()
&& erts_time_offset_state() == ERTS_TIME_OFFSET_FINAL) {
@@ -2867,7 +3124,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_disabled);
}
else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) {
- BIF_RET(erts_eager_check_io ? am_true : am_false);
+ BIF_RET(am_true);
}
else if (ERTS_IS_ATOM_STR("literal_test",BIF_ARG_1)) {
#ifdef ERTS_HAVE_IS_IN_LITERAL_RANGE
@@ -2883,31 +3140,20 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(AM_tag);
#endif
}
- else if (ERTS_IS_ATOM_STR("check_process_code",BIF_ARG_1)) {
- Eterm terms[3];
- Sint length = 1;
- Uint sz = 0;
- Eterm *hp, res;
- DECL_AM(direct_references);
-
- terms[0] = AM_direct_references;
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
- {
- DECL_AM(indirect_references);
- terms[1] = AM_indirect_references;
- terms[2] = am_copy_literals;
- length = 3;
- }
-#endif
- erts_bld_list(NULL, &sz, length, terms);
- hp = HAlloc(BIF_P, sz);
- res = erts_bld_list(&hp, NULL, length, terms);
- BIF_RET(res);
- }
BIF_ERROR(BIF_P, BADARG);
}
+static void monitor_size(ErtsMonitor *mon, void *vsz)
+{
+ *((Uint *) vsz) = erts_monitor_size(mon);
+}
+
+static void link_size(ErtsMonitor *lnk, void *vsz)
+{
+ *((Uint *) vsz) = erts_link_size(lnk);
+}
+
/**********************************************************************/
/* Return information on ports */
/* Info:
@@ -2926,7 +3172,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
{
Eterm res = THE_NON_VALUE;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (item == am_id) {
if (hpp)
@@ -2943,7 +3189,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
INIT_MONITOR_INFOS(mic);
- erts_doforall_links(ERTS_P_LINKS(prt), &collect_one_link, &mic);
+ erts_link_tree_foreach(ERTS_P_LINKS(prt), collect_one_link, (void *) &mic);
if (szp)
*szp += mic.sz;
@@ -2951,7 +3197,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
if (hpp) {
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
res = CONS(*hpp, item, res);
*hpp += 2;
}
@@ -2967,11 +3213,11 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
else if (item == am_monitors) {
MonitorInfoCollection mic;
int i;
- Eterm item;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(ERTS_P_MONITORS(prt),
- &collect_one_origin_monitor, &mic);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
+ collect_one_origin_monitor,
+ (void *) &mic);
if (szp)
*szp += mic.sz;
@@ -2980,11 +3226,10 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
Eterm t;
- Eterm m_type;
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
- m_type = is_port(item) ? am_port : am_process;
- t = TUPLE2(*hpp, m_type, item);
+ ASSERT(mic.mi[i].type == ERTS_MON_TYPE_PORT);
+ ASSERT(is_internal_pid(mic.mi[i].entity.term));
+ t = TUPLE2(*hpp, am_process, mic.mi[i].entity.term);
*hpp += 3;
res = CONS(*hpp, t, res);
*hpp += 2;
@@ -3003,15 +3248,20 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
Eterm item;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(ERTS_P_MONITORS(prt),
- &collect_one_target_monitor, &mic);
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
+ collect_one_target_monitor,
+ (void *) &mic);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
+ collect_one_target_monitor,
+ (void *) &mic);
if (szp)
*szp += mic.sz;
if (hpp) {
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ ASSERT(mic.mi[i].type != ERTS_MON_TYPE_RESOURCE);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
res = CONS(*hpp, item, res);
*hpp += 2;
}
@@ -3087,7 +3337,12 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
*/
Uint size = 0;
- erts_doforall_links(ERTS_P_LINKS(prt), &erts_one_link_size, &size);
+ erts_link_tree_foreach(ERTS_P_LINKS(prt),
+ link_size, (void *) &size);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
+ monitor_size, (void *) &size);
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
+ monitor_size, (void *) &size);
size += erts_port_data_size(prt);
@@ -3116,9 +3371,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
}
else if (ERTS_IS_ATOM_STR("locking", item)) {
if (hpp) {
-#ifndef ERTS_SMP
- res = am_false;
-#else
if (erts_atomic32_read_nob(&prt->state)
& ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
DECL_AM(port_level);
@@ -3132,7 +3384,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
& ERL_DRV_FLAG_USE_PORT_LOCKING));
res = AM_driver_level;
}
-#endif
}
if (szp) {
res = am_true;
@@ -3145,7 +3396,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
goto done;
}
res = ((ERTS_PTS_FLG_PARALLELISM &
- erts_smp_atomic32_read_nob(&prt->sched.flags))
+ erts_atomic32_read_nob(&prt->sched.flags))
? am_true
: am_false);
}
@@ -3221,7 +3472,7 @@ fun_info_2(BIF_ALIST_2)
}
break;
case am_refc:
- val = erts_make_integer(erts_smp_atomic_read_nob(&funp->fe->refc), p);
+ val = erts_make_integer(erts_atomic_read_nob(&funp->fe->refc), p);
hp = HAlloc(p, 3);
break;
case am_arity:
@@ -3248,7 +3499,7 @@ fun_info_2(BIF_ALIST_2)
break;
case am_module:
hp = HAlloc(p, 3);
- val = exp->code[0];
+ val = exp->info.mfa.module;
break;
case am_new_index:
hp = HAlloc(p, 3);
@@ -3276,11 +3527,11 @@ fun_info_2(BIF_ALIST_2)
break;
case am_arity:
hp = HAlloc(p, 3);
- val = make_small(exp->code[2]);
+ val = make_small(exp->info.mfa.arity);
break;
case am_name:
hp = HAlloc(p, 3);
- val = exp->code[1];
+ val = exp->info.mfa.function;
break;
default:
goto error;
@@ -3306,74 +3557,102 @@ fun_info_mfa_1(BIF_ALIST_1)
} else if (is_export(fun)) {
Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
hp = HAlloc(p, 4);
- BIF_RET(TUPLE3(hp,exp->code[0],exp->code[1],make_small(exp->code[2])));
+ BIF_RET(TUPLE3(hp,exp->info.mfa.module,
+ exp->info.mfa.function,
+ make_small(exp->info.mfa.arity)));
}
BIF_ERROR(p, BADARG);
}
+BIF_RETTYPE erts_internal_is_process_alive_2(BIF_ALIST_2)
+{
+ if (!is_internal_pid(BIF_ARG_1) || !is_internal_ordinary_ref(BIF_ARG_2))
+ BIF_ERROR(BIF_P, BADARG);
+ erts_proc_sig_send_is_alive_request(BIF_P, BIF_ARG_1, BIF_ARG_2);
+ BIF_RET(am_ok);
+}
+
BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
{
- if(is_internal_pid(BIF_ARG_1)) {
- Process *rp;
-
- if (BIF_ARG_1 == BIF_P->common.id)
- BIF_RET(am_true);
-
- rp = erts_proc_lookup_raw(BIF_ARG_1);
- if (!rp) {
- BIF_RET(am_false);
- }
- else {
- if (erts_smp_atomic32_read_acqb(&rp->state)
- & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING))
- ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_false);
- else
- BIF_RET(am_true);
- }
- }
- else if(is_external_pid(BIF_ARG_1)) {
- if(external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
+ if (is_internal_pid(BIF_ARG_1)) {
+ erts_aint32_t state;
+ Process *rp;
+
+ if (BIF_ARG_1 == BIF_P->common.id)
+ BIF_RET(am_true);
+
+ rp = erts_proc_lookup_raw(BIF_ARG_1);
+ if (!rp)
+ BIF_RET(am_false);
+
+ state = erts_atomic32_read_acqb(&rp->state);
+ if (state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_SIG_Q
+ | ERTS_PSFLG_SIG_IN_Q)) {
+ /*
+ * If in exiting state, trap out and send 'is alive'
+ * request and wait for it to complete termination.
+ *
+ * If process has signals enqueued, we need to
+ * send it an 'is alive' request via its signal
+ * queue in order to ensure that signal order is
+ * preserved (we may earlier have sent it an
+ * exit signal that has not been processed yet).
+ */
+ BIF_TRAP1(is_process_alive_trap, BIF_P, BIF_ARG_1);
+ }
+
+ BIF_RET(am_true);
+ }
+
+ if (is_external_pid(BIF_ARG_1)) {
+ if (external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
BIF_RET(am_false); /* A pid from an old incarnation of this node */
- else
- BIF_ERROR(BIF_P, BADARG);
- }
- else {
- BIF_ERROR(BIF_P, BADARG);
}
+
+ BIF_ERROR(BIF_P, BADARG);
}
-BIF_RETTYPE process_display_2(BIF_ALIST_2)
+static Eterm
+process_display(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
{
- Process *rp;
+ if (redsp)
+ *redsp = 1;
- if (BIF_ARG_2 != am_backtrace)
- BIF_ERROR(BIF_P, BADARG);
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return am_badarg;
- rp = erts_pid2proc_nropt(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCKS_ALL);
- if(!rp) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_process_display_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
- Eterm args[2] = {BIF_ARG_1, BIF_ARG_2};
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_ALL);
- ERTS_BIF_AWAIT_X_APPLY_TRAP(BIF_P,
- BIF_ARG_1,
- am_erlang,
- am_process_display,
- args,
- 2);
- }
- erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp);
-#ifdef ERTS_SMP
- erts_smp_proc_unlock(rp, (BIF_P == rp
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
-#endif
- BIF_RET(am_true);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_stack_dump(ERTS_PRINT_STDERR, NULL, c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+
+ return am_true;
+}
+
+
+BIF_RETTYPE erts_internal_process_display_2(BIF_ALIST_2)
+{
+ Eterm res;
+
+ if (BIF_ARG_2 != am_backtrace)
+ BIF_RET(am_badarg);
+
+ if (BIF_P->common.id == BIF_ARG_1) {
+ res = process_display(BIF_P, NULL, NULL, NULL);
+ BIF_RET(res);
+ }
+
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_RET(am_badarg);
+
+ res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1,
+ !0,
+ process_display,
+ NULL);
+ if (is_non_value(res))
+ BIF_RET(am_badarg);
+
+ BIF_RET(res);
}
/* this is a general call which return some possibly useful information */
@@ -3384,13 +3663,24 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
Eterm* hp;
if (BIF_ARG_1 == am_scheduler_wall_time) {
- res = erts_sched_wall_time_request(BIF_P, 0, 0);
+ res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 0);
if (is_non_value(res))
BIF_RET(am_undefined);
BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
- } else if (BIF_ARG_1 == am_total_active_tasks
- || BIF_ARG_1 == am_total_run_queue_lengths) {
- Uint no = erts_run_queues_len(NULL, 0, BIF_ARG_1 == am_total_active_tasks);
+ } else if (BIF_ARG_1 == am_scheduler_wall_time_all) {
+ res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 1);
+ if (is_non_value(res))
+ BIF_RET(am_undefined);
+ BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
+ } else if ((BIF_ARG_1 == am_total_active_tasks)
+ | (BIF_ARG_1 == am_total_run_queue_lengths)
+ | (BIF_ARG_1 == am_total_active_tasks_all)
+ | (BIF_ARG_1 == am_total_run_queue_lengths_all)) {
+ Uint no = erts_run_queues_len(NULL, 0,
+ ((BIF_ARG_1 == am_total_active_tasks)
+ | (BIF_ARG_1 == am_total_active_tasks_all)),
+ ((BIF_ARG_1 == am_total_active_tasks_all)
+ | (BIF_ARG_1 == am_total_run_queue_lengths_all)));
if (IS_USMALL(0, no))
res = make_small(no);
else {
@@ -3398,13 +3688,21 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = uint_to_big(no, hp);
}
BIF_RET(res);
- } else if (BIF_ARG_1 == am_active_tasks
- || BIF_ARG_1 == am_run_queue_lengths) {
+ } else if ((BIF_ARG_1 == am_active_tasks)
+ | (BIF_ARG_1 == am_run_queue_lengths)
+ | (BIF_ARG_1 == am_active_tasks_all)
+ | (BIF_ARG_1 == am_run_queue_lengths_all)) {
Eterm res, *hp, **hpp;
Uint sz, *szp;
- int no_qs = erts_no_run_queues;
+ int incl_dirty_io = ((BIF_ARG_1 == am_active_tasks_all)
+ | (BIF_ARG_1 == am_run_queue_lengths_all));
+ int no_qs = (erts_no_run_queues + ERTS_NUM_DIRTY_CPU_RUNQS +
+ (incl_dirty_io ? ERTS_NUM_DIRTY_IO_RUNQS : 0));
Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
- (void) erts_run_queues_len(qszs, 0, BIF_ARG_1 == am_active_tasks);
+ (void) erts_run_queues_len(qszs, 0,
+ ((BIF_ARG_1 == am_active_tasks)
+ | (BIF_ARG_1 == am_active_tasks_all)),
+ incl_dirty_io);
sz = 0;
szp = &sz;
hpp = NULL;
@@ -3468,24 +3766,32 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_runtime) {
- UWord u1, u2, dummy;
+ ErtsMonotonicTime u1, u2;
Eterm b1, b2;
- elapsed_time_both(&u1,&dummy,&u2,&dummy);
- b1 = erts_make_integer(u1,BIF_P);
- b2 = erts_make_integer(u2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ Uint hsz;
+ erts_runtime_elapsed_both(&u1, NULL, &u2, NULL);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, u1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, u2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, u1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, u2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_run_queue) {
- res = erts_run_queues_len(NULL, 1, 0);
+ res = erts_run_queues_len(NULL, 1, 0, 0);
BIF_RET(make_small(res));
} else if (BIF_ARG_1 == am_wall_clock) {
- UWord w1, w2;
+ ErtsMonotonicTime w1, w2;
Eterm b1, b2;
- wall_clock_elapsed_time_both(&w1, &w2);
- b1 = erts_make_integer((Uint) w1,BIF_P);
- b2 = erts_make_integer((Uint) w2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ Uint hsz;
+ erts_wall_clock_elapsed_both(&w1, &w2);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, w1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, w2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, w1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, w2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_io) {
@@ -3495,9 +3801,9 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
Eterm res, *hp, **hpp;
Uint sz, *szp;
- int no_qs = erts_no_run_queues;
+ int no_qs = erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS;
Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
- (void) erts_run_queues_len(qszs, 0, 0);
+ (void) erts_run_queues_len(qszs, 0, 0, 1);
sz = 0;
szp = &sz;
hpp = NULL;
@@ -3523,7 +3829,12 @@ BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
BIF_RET(erts_error_logger_warnings);
}
-static erts_smp_atomic_t available_internal_state;
+static erts_atomic_t available_internal_state;
+
+static int empty_magic_ref_destructor(Binary *bin)
+{
+ return 1;
+}
BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
{
@@ -3531,7 +3842,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
* NOTE: Only supposed to be used for testing, and debugging.
*/
- if (!erts_smp_atomic_read_nob(&available_internal_state)) {
+ if (!erts_atomic_read_nob(&available_internal_state)) {
BIF_ERROR(BIF_P, EXC_UNDEF);
}
@@ -3563,7 +3874,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
/* Used by ets_SUITE (stdlib) */
size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
- BIF_RET(make_small((Uint) words));
+ Eterm* hp = HAlloc(BIF_P ,3);
+ BIF_RET(TUPLE2(hp, make_small((Uint) words),
+ erts_ets_hash_sizeof_ext_segtab()));
}
else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
/* Used by driver_SUITE (emulator) */
@@ -3572,9 +3885,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
int no_errors;
ErtsCheckIoDebugInfo ciodi = {0};
#ifdef HAVE_ERTS_CHECK_IO_DEBUG
- erts_smp_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
no_errors = erts_check_io_debug(&ciodi);
- erts_smp_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
#else
no_errors = 0;
#endif
@@ -3589,8 +3902,8 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
(Uint) ciodi.no_used_fds),
erts_bld_uint(hpp, szp,
(Uint) ciodi.no_driver_select_structs),
- erts_bld_uint(hpp, szp,
- (Uint) ciodi.no_driver_event_structs));
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_enif_select_structs));
if (hpp)
break;
hp = HAlloc(BIF_P, sz);
@@ -3605,7 +3918,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
Eterm res = NIL;
Uint *hp = HAlloc(BIF_P, 2*ERTS_PI_ARGS);
for (i = ERTS_PI_ARGS-1; i >= 0; i--) {
- res = CONS(hp, pi_args[i], res);
+ res = CONS(hp, pi_args[i].name, res);
hp += 2;
}
BIF_RET(res);
@@ -3624,9 +3937,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) {
Uint n;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
n = erts_debug_nbalance();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(erts_make_integer(n, BIF_P));
}
else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) {
@@ -3641,11 +3954,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) {
Eterm res;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
BIF_RET(res);
}
else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) {
@@ -3674,6 +3987,36 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(erts_sint64_to_big(value, &hp));
}
}
+ else if (ERTS_IS_ATOM_STR("stack_check", BIF_ARG_1)) {
+ UWord size;
+ char c;
+ if (erts_is_above_stack_limit(&c))
+ size = erts_check_stack_recursion_downwards(&c);
+ else
+ size = erts_check_stack_recursion_upwards(&c);
+ if (IS_SSMALL(size))
+ BIF_RET(make_small(size));
+ else {
+ Uint hsz = BIG_UWORD_HEAP_SIZE(size);
+ Eterm *hp = HAlloc(BIF_P, hsz);
+ BIF_RET(uword_to_big(size, hp));
+ }
+ } else if (ERTS_IS_ATOM_STR("scheduler_dump", BIF_ARG_1)) {
+#if defined(ERTS_HAVE_TRY_CATCH) && defined(ERTS_SYS_SUSPEND_SIGNAL)
+ BIF_RET(am_true);
+#else
+ BIF_RET(am_false);
+#endif
+ }
+ else if (ERTS_IS_ATOM_STR("lc_graph", BIF_ARG_1)) {
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ Eterm res = erts_lc_dump_graph();
+ BIF_RET(res);
+#else
+ BIF_RET(am_notsup);
+#endif
+ }
+
}
else if (is_tuple(BIF_ARG_1)) {
Eterm* tp = tuple_val(BIF_ARG_1);
@@ -3686,22 +4029,59 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(erts_process_status(NULL, tp[2]));
}
}
+ else if (ERTS_IS_ATOM_STR("connection_id", tp[1])) {
+ DistEntry *dep;
+ Eterm *hp, res;
+ Uint con_id, hsz = 0;
+ if (!is_atom(tp[2]))
+ BIF_ERROR(BIF_P, BADARG);
+ dep = erts_sysname_to_connected_dist_entry(tp[2]);
+ if (!dep)
+ BIF_ERROR(BIF_P, BADARG);
+ erts_de_rlock(dep);
+ con_id = (Uint) dep->connection_id;
+ erts_de_runlock(dep);
+ (void) erts_bld_uint(NULL, &hsz, con_id);
+ hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
+ res = erts_bld_uint(&hp, NULL, con_id);
+ BIF_RET(res);
+ }
else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
/* Used by erl_link_SUITE (emulator) */
if(is_internal_pid(tp[2])) {
+ erts_aint32_t state;
Eterm res;
Process *p;
+ int sigs_done, local_only;
p = erts_pid2proc(BIF_P,
ERTS_PROC_LOCK_MAIN,
tp[2],
- ERTS_PROC_LOCK_LINK);
+ ERTS_PROC_LOCK_MAIN);
if (!p) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
BIF_RET(am_undefined);
}
- res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+
+ local_only = 0;
+ do {
+ int reds = CONTEXT_REDS;
+ sigs_done = erts_proc_sig_handle_incoming(p,
+ &state,
+ &reds,
+ CONTEXT_REDS,
+ local_only);
+ local_only = !0;
+ } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
+
+ if (!(state & ERTS_PSFLG_EXITING))
+ res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
+ else if (BIF_P == p)
+ ERTS_BIF_EXITED(BIF_P);
+ else
+ res = am_undefined;
+ if (BIF_P != p)
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
}
else if(is_internal_port(tp[2])) {
@@ -3712,20 +4092,20 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
ERTS_PORT_SFLGS_INVALID_LOOKUP);
if(!p)
BIF_RET(am_undefined);
- res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL);
+ res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
erts_port_release(p);
BIF_RET(res);
}
else if(is_node_name_atom(tp[2])) {
DistEntry *dep = erts_find_dist_entry(tp[2]);
if(dep) {
- Eterm subres;
- erts_smp_de_links_lock(dep);
- subres = make_link_list(BIF_P, dep->nlinks, NIL);
- subres = make_link_list(BIF_P, dep->node_links, subres);
- erts_smp_de_links_unlock(dep);
- erts_deref_dist_entry(dep);
- BIF_RET(subres);
+ Eterm res = NIL;
+ if (dep->mld) {
+ erts_mtx_lock(&dep->mld->mtx);
+ res = make_link_list(BIF_P, 0, dep->mld->links, NIL);
+ erts_mtx_unlock(&dep->mld->mtx);
+ }
+ BIF_RET(res);
} else {
BIF_RET(am_undefined);
}
@@ -3734,28 +4114,54 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("monitor_list", tp[1])) {
/* Used by erl_link_SUITE (emulator) */
if(is_internal_pid(tp[2])) {
+ erts_aint32_t state;
Process *p;
Eterm res;
+ int sigs_done, local_only;
p = erts_pid2proc(BIF_P,
ERTS_PROC_LOCK_MAIN,
tp[2],
- ERTS_PROC_LOCK_LINK);
+ ERTS_PROC_LOCK_MAIN);
if (!p) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
BIF_RET(am_undefined);
}
- res = make_monitor_list(BIF_P, ERTS_P_MONITORS(p));
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+
+ local_only = 0;
+ do {
+ int reds = CONTEXT_REDS;
+ sigs_done = erts_proc_sig_handle_incoming(p,
+ &state,
+ &reds,
+ CONTEXT_REDS,
+ local_only);
+ local_only = !0;
+ } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
+
+ if (!(state & ERTS_PSFLG_EXITING)) {
+ res = make_monitor_list(BIF_P, 1, ERTS_P_MONITORS(p), NIL);
+ res = make_monitor_list(BIF_P, 0, ERTS_P_LT_MONITORS(p), res);
+ }
+ else {
+ if (BIF_P == p)
+ ERTS_BIF_EXITED(BIF_P);
+ else
+ res = am_undefined;
+ }
+ if (BIF_P != p)
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
} else if(is_node_name_atom(tp[2])) {
DistEntry *dep = erts_find_dist_entry(tp[2]);
if(dep) {
- Eterm ml;
- erts_smp_de_links_lock(dep);
- ml = make_monitor_list(BIF_P, dep->monitors);
- erts_smp_de_links_unlock(dep);
- erts_deref_dist_entry(dep);
+ Eterm ml = NIL;
+ if (dep->mld) {
+ erts_mtx_lock(&dep->mld->mtx);
+ ml = make_monitor_list(BIF_P, 1, dep->mld->orig_name_monitors, NIL);
+ ml = make_monitor_list(BIF_P, 0, dep->mld->monitors, ml);
+ erts_mtx_unlock(&dep->mld->mtx);
+ }
BIF_RET(ml);
} else {
BIF_RET(am_undefined);
@@ -3770,22 +4176,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else {
Uint cno = dist_entry_channel_no(dep);
res = make_small(cno);
- erts_deref_dist_entry(dep);
}
BIF_RET(res);
}
- else if (ERTS_IS_ATOM_STR("have_pending_exit", tp[1])) {
- Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- tp[2], ERTS_PROC_LOCK_STATUS);
- if (!rp) {
- BIF_RET(am_undefined);
- }
- else {
- Eterm res = ERTS_PROC_PENDING_EXIT(rp) ? am_true : am_false;
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- BIF_RET(res);
- }
- }
else if (ERTS_IS_ATOM_STR("binary_info", tp[1])) {
Eterm bin = tp[2];
if (is_binary(bin)) {
@@ -3826,21 +4219,20 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(res);
}
}
- else if (ERTS_IS_ATOM_STR("term_to_binary_no_funs", tp[1])) {
- Uint dflags = (DFLAG_EXTENDED_REFERENCES |
- DFLAG_EXTENDED_PIDS_PORTS |
- DFLAG_BIT_BINARIES);
+ else if (ERTS_IS_ATOM_STR("term_to_binary_tuple_fallbacks", tp[1])) {
+ Uint dflags = (TERM_TO_BINARY_DFLAGS
+ & ~DFLAG_EXPORT_PTR_TAG
+ & ~DFLAG_BIT_BINARIES);
BIF_RET(erts_term_to_binary(BIF_P, tp[2], 0, dflags));
}
- else if (ERTS_IS_ATOM_STR("dist_port", tp[1])) {
+ else if (ERTS_IS_ATOM_STR("dist_ctrl", tp[1])) {
Eterm res = am_undefined;
DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]);
if (dep) {
- erts_smp_de_rlock(dep);
- if (is_internal_port(dep->cid))
+ erts_de_rlock(dep);
+ if (is_internal_port(dep->cid) || is_internal_pid(dep->cid))
res = dep->cid;
- erts_smp_de_runlock(dep);
- erts_deref_dist_entry(dep);
+ erts_de_runlock(dep);
}
BIF_RET(res);
}
@@ -3866,7 +4258,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
}
else if (ERTS_IS_ATOM_STR("internal_hash", tp[1])) {
- Uint hash = (Uint) make_internal_hash(tp[2]);
+ Uint hash = (Uint) make_internal_hash(tp[2], 0);
Uint hsz = 0;
Eterm* hp;
erts_bld_uint(NULL, &hsz, hash);
@@ -3880,10 +4272,38 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
while (ix >= atom_table_size()) {
char tmp[20];
erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size());
- erts_atom_put((byte *) tmp, strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
+ erts_atom_put((byte *) tmp, sys_strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
}
return make_atom(ix);
}
+ else if (ERTS_IS_ATOM_STR("magic_ref", tp[1])) {
+ Binary *bin;
+ UWord bin_addr, refc;
+ Eterm bin_addr_term, refc_term, test_type;
+ Uint sz;
+ Eterm *hp;
+ if (!is_internal_magic_ref(tp[2])) {
+ if (is_internal_ordinary_ref(tp[2])) {
+ ErtsORefThing *rtp;
+ rtp = (ErtsORefThing *) internal_ref_val(tp[2]);
+ if (erts_is_ref_numbers_magic(rtp->num))
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+ }
+ bin = erts_magic_ref2bin(tp[2]);
+ refc = erts_refc_read(&bin->intern.refc, 1);
+ bin_addr = (UWord) bin;
+ sz = 4;
+ erts_bld_uword(NULL, &sz, bin_addr);
+ erts_bld_uword(NULL, &sz, refc);
+ hp = HAlloc(BIF_P, sz);
+ bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
+ refc_term = erts_bld_uword(&hp, NULL, refc);
+ test_type = (ERTS_MAGIC_BIN_DESTRUCTOR(bin) == empty_magic_ref_destructor
+ ? am_true : am_false);
+ BIF_RET(TUPLE3(hp, bin_addr_term, refc_term, test_type));
+ }
break;
}
@@ -3956,7 +4376,7 @@ BIF_RETTYPE erts_internal_system_check_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
-static erts_smp_atomic_t hipe_test_reschedule_flag;
+static erts_atomic_t hipe_test_reschedule_flag;
#if defined(VALGRIND) && defined(__GNUC__)
/* Force noinline for valgrind suppression */
@@ -3972,7 +4392,6 @@ static void broken_halt_test(Eterm bif_arg_2)
erts_exit(ERTS_DUMP_EXIT, "%T", bif_arg_2);
}
-
BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
{
/*
@@ -3981,7 +4400,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
&& (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
- erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on);
+ erts_aint_t prev_on = erts_atomic_xchg_nob(&available_internal_state, on);
if (on) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id);
@@ -3997,7 +4416,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(prev_on ? am_true : am_false);
}
- if (!erts_smp_atomic_read_nob(&available_internal_state)) {
+ if (!erts_atomic_read_nob(&available_internal_state)) {
BIF_ERROR(BIF_P, EXC_UNDEF);
}
@@ -4021,13 +4440,13 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
Sint ms;
if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
if (ms > 0) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (block)
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
while (erts_milli_sleep((long) ms) != 0);
if (block)
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
}
BIF_RET(am_true);
}
@@ -4036,9 +4455,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
Sint ms;
if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
if (ms > 0) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
while (erts_milli_sleep((long) ms) != 0);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
}
BIF_RET(am_true);
}
@@ -4086,61 +4505,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
erts_set_gc_state(BIF_P, enable);
BIF_RET(res);
}
- else if (ERTS_IS_ATOM_STR("send_fake_exit_signal", BIF_ARG_1)) {
- /* Used by signal_SUITE (emulator) */
-
- /* Testcases depend on the exit being received via
- a pending exit when the receiver is the same as
- the caller. */
- if (is_tuple(BIF_ARG_2)) {
- Eterm* tp = tuple_val(BIF_ARG_2);
- if (arityval(tp[0]) == 3
- && (is_pid(tp[1]) || is_port(tp[1]))
- && is_internal_pid(tp[2])) {
- int xres;
- ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- tp[2], rp_locks);
- if (!rp) {
- DECL_AM(dead);
- BIF_RET(AM_dead);
- }
-
-#ifdef ERTS_SMP
- if (BIF_P == rp)
- rp_locks |= ERTS_PROC_LOCK_MAIN;
-#endif
- xres = erts_send_exit_signal(NULL, /* NULL in order to
- force a pending exit
- when we send to our
- selves. */
- tp[1],
- rp,
- &rp_locks,
- tp[3],
- NIL,
- NULL,
- 0);
-#ifdef ERTS_SMP
- if (BIF_P == rp)
- rp_locks &= ~ERTS_PROC_LOCK_MAIN;
-#endif
- erts_smp_proc_unlock(rp, rp_locks);
- if (xres > 1) {
- DECL_AM(message);
- BIF_RET(AM_message);
- }
- else if (xres == 0) {
- DECL_AM(unaffected);
- BIF_RET(AM_unaffected);
- }
- else {
- DECL_AM(exit);
- BIF_RET(AM_exit);
- }
- }
- }
- }
else if (ERTS_IS_ATOM_STR("colliding_names", BIF_ARG_1)) {
/* Used by ets_SUITE (stdlib) */
if (is_tuple(BIF_ARG_2)) {
@@ -4187,14 +4551,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
/* Used by hipe test suites */
- erts_aint_t flag = erts_smp_atomic_read_nob(&hipe_test_reschedule_flag);
+ erts_aint_t flag = erts_atomic_read_nob(&hipe_test_reschedule_flag);
if (!flag && BIF_ARG_2 != am_false) {
- erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, 1);
+ erts_atomic_set_nob(&hipe_test_reschedule_flag, 1);
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2],
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
- erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, !flag);
+ erts_atomic_set_nob(&hipe_test_reschedule_flag, !flag);
BIF_RET(NIL);
}
else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) {
@@ -4205,7 +4569,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
if (rp) {
erts_resume(rp, ERTS_PROC_LOCK_STATUS);
res = am_true;
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
}
BIF_RET(res);
}
@@ -4222,39 +4586,13 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(am_false);
else {
Uint32 con_id;
- erts_smp_de_rlock(dep);
+ erts_de_rlock(dep);
con_id = dep->connection_id;
- erts_smp_de_runlock(dep);
+ erts_de_runlock(dep);
erts_kill_dist_connection(dep, con_id);
- erts_deref_dist_entry(dep);
BIF_RET(am_true);
}
}
- else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) {
-#ifdef ERTS_SMP
- int old_use_opt, use_opt;
- switch (BIF_ARG_2) {
- case am_true:
- use_opt = 1;
- break;
- case am_false:
- use_opt = 0;
- break;
- default:
- BIF_ERROR(BIF_P, BADARG);
- }
-
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- old_use_opt = !erts_disable_proc_not_running_opt;
- erts_disable_proc_not_running_opt = !use_opt;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(old_use_opt ? am_true : am_false);
-#else
- BIF_ERROR(BIF_P, EXC_NOTSUP);
-#endif
- }
else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
int flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
@@ -4281,9 +4619,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
Sint64 msecs;
if (term_to_Sint64(BIF_ARG_2, &msecs)) {
/* Negative value restore original value... */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_debug_test_node_tab_delayed_delete(msecs);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_ok);
}
}
@@ -4298,54 +4636,210 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
BIF_RET(am_ok);
}
+ else if (ERTS_IS_ATOM_STR("make", BIF_ARG_1)) {
+ if (ERTS_IS_ATOM_STR("magic_ref", BIF_ARG_2)) {
+ Binary *bin = erts_create_magic_binary(0, empty_magic_ref_destructor);
+ UWord bin_addr = (UWord) bin;
+ Eterm bin_addr_term, magic_ref, res;
+ Eterm *hp;
+ Uint sz = ERTS_MAGIC_REF_THING_SIZE + 3;
+ erts_bld_uword(NULL, &sz, bin_addr);
+ hp = HAlloc(BIF_P, sz);
+ bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
+ magic_ref = erts_mk_magic_ref(&hp, &BIF_P->off_heap, bin);
+ res = TUPLE2(hp, magic_ref, bin_addr_term);
+ BIF_RET(res);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("binary", BIF_ARG_1)) {
+ Sint64 size;
+ if (term_to_Sint64(BIF_ARG_2, &size)) {
+ Binary* refbin = erts_bin_drv_alloc_fnf(size);
+ if (!refbin)
+ BIF_RET(am_false);
+ sys_memset(refbin->orig_bytes, 0, size);
+ BIF_RET(erts_build_proc_bin(&MSO(BIF_P),
+ HAlloc(BIF_P, PROC_BIN_SIZE),
+ refbin));
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("ets_force_trap", BIF_ARG_1)) {
+#ifdef ETS_DBG_FORCE_TRAP
+ erts_ets_dbg_force_trap = (BIF_ARG_2 == am_true) ? 1 : 0;
+ BIF_RET(am_ok);
+#else
+ BIF_RET(am_notsup);
+#endif
+ }
}
BIF_ERROR(BIF_P, BADARG);
}
+static BIF_RETTYPE
+gather_histograms_helper(Process * c_p, Eterm arg_tuple,
+ int gather(Process *, int, int, int, UWord, Eterm))
+{
+ SWord hist_start, hist_width, sched_id;
+ int msg_count, alloc_num;
+ Eterm *args;
+
+ /* This is an internal BIF, so the error checking is mostly left to erlang
+ * code. */
+
+ ASSERT(is_tuple_arity(arg_tuple, 5));
+ args = tuple_val(arg_tuple);
+
+ for (alloc_num = ERTS_ALC_A_MIN; alloc_num <= ERTS_ALC_A_MAX; alloc_num++) {
+ if(erts_is_atom_str(ERTS_ALC_A2AD(alloc_num), args[1], 0)) {
+ break;
+ }
+ }
+
+ if (alloc_num > ERTS_ALC_A_MAX) {
+ BIF_ERROR(c_p, BADARG);
+ }
+
+ sched_id = signed_val(args[2]);
+ hist_width = signed_val(args[3]);
+ hist_start = signed_val(args[4]);
+
+ if (sched_id < 0 || sched_id > erts_no_schedulers) {
+ BIF_ERROR(c_p, BADARG);
+ }
+
+ msg_count = gather(c_p, alloc_num, sched_id, hist_width, hist_start, args[5]);
+
+ BIF_RET(make_small(msg_count));
+}
+
+BIF_RETTYPE erts_internal_gather_alloc_histograms_1(BIF_ALIST_1)
+{
+ return gather_histograms_helper(BIF_P, BIF_ARG_1,
+ erts_alcu_gather_alloc_histograms);
+}
+
+BIF_RETTYPE erts_internal_gather_carrier_info_1(BIF_ALIST_1)
+{
+ return gather_histograms_helper(BIF_P, BIF_ARG_1,
+ erts_alcu_gather_carrier_info);
+}
+
#ifdef ERTS_ENABLE_LOCK_COUNT
+
+typedef struct {
+ /* info->location_count may increase between size calculation and term
+ * building, so we cap it at the value sampled in lcnt_build_result_vector.
+ *
+ * Shrinking is safe though. */
+ int max_location_count;
+ erts_lcnt_lock_info_t *info;
+} lcnt_sample_t;
+
+typedef struct lcnt_sample_vector_ {
+ lcnt_sample_t *elements;
+ size_t size;
+} lcnt_sample_vector_t;
+
+static lcnt_sample_vector_t lcnt_build_sample_vector(erts_lcnt_lock_info_list_t *list) {
+ erts_lcnt_lock_info_t *iterator;
+ lcnt_sample_vector_t result;
+ size_t allocated_entries;
+
+ allocated_entries = 64;
+ result.size = 0;
+
+ result.elements = erts_alloc(ERTS_ALC_T_LCNT_VECTOR,
+ allocated_entries * sizeof(lcnt_sample_t));
+
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(list, &iterator)) {
+ erts_lcnt_retain_lock_info(iterator);
+
+ result.elements[result.size].max_location_count = iterator->location_count;
+ result.elements[result.size].info = iterator;
+
+ result.size++;
+
+ if(result.size >= allocated_entries) {
+ allocated_entries *= 2;
+
+ result.elements = erts_realloc(ERTS_ALC_T_LCNT_VECTOR, result.elements,
+ allocated_entries * sizeof(lcnt_sample_t));
+ }
+ }
+
+ return result;
+}
+
+static void lcnt_destroy_sample_vector(lcnt_sample_vector_t *vector) {
+ size_t i;
+
+ for(i = 0; i < vector->size; i++) {
+ erts_lcnt_release_lock_info(vector->elements[i].info);
+ }
+
+ erts_free(ERTS_ALC_T_LCNT_VECTOR, vector->elements);
+}
+
+/* The size of an integer is not guaranteed to be constant since we're walking
+ * over live data, and may cross over into bignum territory between size calc
+ * and the actual build. This takes care of that through always assuming the
+ * worst, but needs to be fixed up with HRelease once the final term has been
+ * built. */
+static ERTS_INLINE Eterm bld_unstable_uint64(Uint **hpp, Uint *szp, Uint64 ui) {
+ Eterm res = THE_NON_VALUE;
+
+ if(szp) {
+ *szp += ERTS_UINT64_HEAP_SIZE(~((Uint64) 0));
+ }
+
+ if(hpp) {
+ if (IS_USMALL(0, ui)) {
+ res = make_small(ui);
+ } else {
+ res = erts_uint64_to_big(ui, hpp);
+ }
+ }
+
+ return res;
+}
+
static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
- Uint tries = 0, colls = 0;
- unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
- unsigned int line = 0;
unsigned int i;
-
+ const char *file;
+
Eterm af, uil;
Eterm uit, uic;
Eterm uits, uitns, uitn;
Eterm tt, tstat, tloc, t;
Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
-
+
/* term:
- * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}},
- * { .. histogram .. }]
- */
+ * [{{file, line},
+ {tries, colls, {seconds, nanoseconds, n_blocks}},
+ * { .. histogram .. }] */
- tries = (Uint) ethr_atomic_read(&stats->tries);
- colls = (Uint) ethr_atomic_read(&stats->colls);
-
- line = stats->line;
- timer_s = stats->timer.s;
- timer_ns = stats->timer.ns;
- timer_n = stats->timer_n;
-
- af = erts_atom_put((byte *)stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
- uil = erts_bld_uint( hpp, szp, line);
+ file = stats->file ? stats->file : "undefined";
+
+ af = erts_atom_put((byte *)file, sys_strlen(file), ERTS_ATOM_ENC_LATIN1, 1);
+ uil = erts_bld_uint( hpp, szp, stats->line);
tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
-
- uit = erts_bld_uint( hpp, szp, tries);
- uic = erts_bld_uint( hpp, szp, colls);
- uits = erts_bld_uint( hpp, szp, timer_s);
- uitns = erts_bld_uint( hpp, szp, timer_ns);
- uitn = erts_bld_uint( hpp, szp, timer_n);
+ uit = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->attempts));
+ uic = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->collisions));
+
+ uits = bld_unstable_uint64(hpp, szp, stats->total_time_waited.s);
+ uitns = bld_unstable_uint64(hpp, szp, stats->total_time_waited.ns);
+ uitn = bld_unstable_uint64(hpp, szp, stats->times_waited);
tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
- vhist[i] = erts_bld_uint(hpp, szp, stats->hist.ns[i]);
+ vhist[i] = bld_unstable_uint64(hpp, szp, stats->wait_time_histogram.ns[i]);
}
+
thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
@@ -4354,198 +4848,279 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
return res;
}
-static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock, Eterm res) {
+static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) {
+ Eterm id = info->id;
+
+ if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_PROCLOCK) {
+ /* Use registered names as id's for process locks if available. Thread
+ * progress is delayed since we may be running on a dirty scheduler. */
+ ErtsThrPrgrDelayHandle delay_handle;
+ Process *process;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+
+ process = erts_proc_lookup(info->id);
+ if (process && process->common.u.alive.reg) {
+ id = process->common.u.alive.reg->name;
+ }
+
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ } else if(info->flags & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
+ if(is_small(id) && !sys_strcmp(info->name, "alcu_allocator")) {
+ const char *name = (const char*)ERTS_ALC_A2AD(signed_val(id));
+ id = erts_atom_put((byte*)name, sys_strlen(name), ERTS_ATOM_ENC_LATIN1, 1);
+ }
+ }
+
+ return id;
+}
+
+static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, lcnt_sample_t *sample, Eterm res) {
+ erts_lcnt_lock_info_t *info = sample->info;
+
Eterm name, type, id, stats = NIL, t;
- Process *proc = NULL;
- char *ltype;
+ const char *lock_desc;
int i;
+
+ /* term: [{name, id, type, stats()}] */
+
+ ASSERT(info->name);
- /* term:
- * [{name, id, type, stats()}]
- */
-
- ASSERT(lock->name);
-
- ltype = erts_lcnt_lock_type(lock->flag);
-
- ASSERT(ltype);
-
- type = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- name = erts_atom_put((byte *)lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
-
- if (lock->flag & ERTS_LCNT_LT_ALLOC) {
- /* use allocator types names as id's for allocator locks */
- ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
- id = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
- /* use registered names as id's for process locks if available */
- proc = erts_proc_lookup(lock->id);
- if (proc && proc->common.u.alive.reg) {
- id = proc->common.u.alive.reg->name;
- } else {
- /* otherwise use process id */
- id = lock->id;
- }
+ lock_desc = erts_lock_flags_get_type_name(info->flags);
+
+ type = erts_atom_put((byte*)lock_desc, sys_strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1);
+ name = erts_atom_put((byte*)info->name, sys_strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1);
+
+ /* Only attempt to resolve ids when actually emitting the term. This ought
+ * to be safe since all immediates are the same size. */
+ if(hpp != NULL) {
+ id = lcnt_pretty_print_lock_id(info);
} else {
- id = lock->id;
+ id = NIL;
}
- for (i = 0; i < lock->n_stats; i++) {
- stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
+ for(i = 0; i < MIN(info->location_count, sample->max_location_count); i++) {
+ stats = lcnt_build_lock_stats_term(hpp, szp, &(info->location_stats[i]), stats);
}
t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
- res = erts_bld_cons( hpp, szp, t, res);
+ res = erts_bld_cons(hpp, szp, t, res);
return res;
}
-static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *data, Eterm res) {
+static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_time_t *duration,
+ lcnt_sample_vector_t *current_locks,
+ lcnt_sample_vector_t *deleted_locks, Eterm res) {
+ const char *str_duration = "duration";
+ const char *str_locks = "locks";
+
Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
- erts_lcnt_lock_t *lock = NULL;
- char *str_duration = "duration";
- char *str_locks = "locks";
-
- /* term:
- * [{'duration', {seconds, nanoseconds}}, {'locks', locks()}]
- */
-
+ size_t i;
+
+ /* term: [{'duration', {seconds, nanoseconds}}, {'locks', locks()}] */
+
/* duration tuple */
- dts = erts_bld_uint( hpp, szp, data->duration.s);
- dtns = erts_bld_uint( hpp, szp, data->duration.ns);
+ dts = bld_unstable_uint64(hpp, szp, duration->s);
+ dtns = bld_unstable_uint64(hpp, szp, duration->ns);
tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
-
- adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
+
+ adur = erts_atom_put((byte *)str_duration, sys_strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
/* lock tuple */
-
- aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
-
- for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+ aloc = erts_atom_put((byte *)str_locks, sys_strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
+
+ for(i = 0; i < current_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &current_locks->elements[i], lloc);
}
-
- for (lock = data->deleted_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+
+ for(i = 0; i < deleted_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &deleted_locks->elements[i], lloc);
}
-
+
tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
-
- res = erts_bld_cons( hpp, szp, tloc, res);
- res = erts_bld_cons( hpp, szp, tdur, res);
+
+ res = erts_bld_cons(hpp, szp, tloc, res);
+ res = erts_bld_cons(hpp, szp, tdur, res);
return res;
-}
+}
+
+static struct {
+ const char *name;
+ erts_lock_flags_t flag;
+} lcnt_category_map[] = {
+ {"allocator", ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR},
+ {"db", ERTS_LOCK_FLAGS_CATEGORY_DB},
+ {"debug", ERTS_LOCK_FLAGS_CATEGORY_DEBUG},
+ {"distribution", ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION},
+ {"generic", ERTS_LOCK_FLAGS_CATEGORY_GENERIC},
+ {"io", ERTS_LOCK_FLAGS_CATEGORY_IO},
+ {"process", ERTS_LOCK_FLAGS_CATEGORY_PROCESS},
+ {"scheduler", ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER},
+ {NULL, 0}
+ };
+
+static erts_lock_flags_t lcnt_atom_to_lock_category(Eterm atom) {
+ int i = 0;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(erts_is_atom_str(lcnt_category_map[i].name, atom, 0)) {
+ return lcnt_category_map[i].flag;
+ }
+ }
+
+ return 0;
+}
+
+static Eterm lcnt_build_category_list(Eterm **hpp, Uint *szp, erts_lock_flags_t mask) {
+ Eterm res;
+ int i;
+
+ res = NIL;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(mask & lcnt_category_map[i].flag) {
+ Eterm category = erts_atom_put((byte*)lcnt_category_map[i].name,
+ sys_strlen(lcnt_category_map[i].name),
+ ERTS_ATOM_ENC_UTF8, 0);
+
+ res = erts_bld_cons(hpp, szp, category, res);
+ }
+ }
+
+ return res;
+}
+
#endif
-BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
+BIF_RETTYPE erts_debug_lcnt_clear_0(BIF_ALIST_0)
{
-#ifdef ERTS_ENABLE_LOCK_COUNT
- Eterm res = NIL;
-#endif
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
+#else
+ erts_lcnt_clear_counters();
+ BIF_RET(am_ok);
+#endif
+}
- if (BIF_ARG_1 == am_enabled) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- BIF_RET(am_true);
+BIF_RETTYPE erts_debug_lcnt_collect_0(BIF_ALIST_0)
+{
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
#else
- BIF_RET(am_false);
+ lcnt_sample_vector_t current_locks, deleted_locks;
+ erts_lcnt_data_t data;
+
+ Eterm *term_heap_start, *term_heap_end;
+ Uint term_heap_size = 0;
+ Eterm result;
+
+ data = erts_lcnt_get_data();
+
+ current_locks = lcnt_build_sample_vector(data.current_locks);
+ deleted_locks = lcnt_build_sample_vector(data.deleted_locks);
+
+ lcnt_build_result_term(NULL, &term_heap_size, &data.duration,
+ &current_locks, &deleted_locks, NIL);
+
+ term_heap_start = HAlloc(BIF_P, term_heap_size);
+ term_heap_end = term_heap_start;
+
+ result = lcnt_build_result_term(&term_heap_end, NULL,
+ &data.duration, &current_locks, &deleted_locks, NIL);
+
+ HRelease(BIF_P, term_heap_start + term_heap_size, term_heap_end);
+
+ lcnt_destroy_sample_vector(&current_locks);
+ lcnt_destroy_sample_vector(&deleted_locks);
+
+ BIF_RET(result);
#endif
- }
+}
+
+BIF_RETTYPE erts_debug_lcnt_control_1(BIF_ALIST_1)
+{
#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t mask;
+ Eterm *term_heap_block;
+ Uint term_heap_size;
- else if (BIF_ARG_1 == am_info) {
- erts_lcnt_data_t *data;
- Uint hsize = 0;
- Uint *szp;
- Eterm* hp;
+ mask = erts_lcnt_get_category_mask();
+ term_heap_size = 0;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ lcnt_build_category_list(NULL, &term_heap_size, mask);
- erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
- data = erts_lcnt_get_data();
+ term_heap_block = HAlloc(BIF_P, term_heap_size);
- /* calculate size */
+ BIF_RET(lcnt_build_category_list(&term_heap_block, NULL, mask));
+ } else if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ if(erts_lcnt_get_preserve_info()) {
+ BIF_RET(am_true);
+ }
- szp = &hsize;
- lcnt_build_result_term(NULL, szp, data, NIL);
+ BIF_RET(am_false);
+ }
+#endif
+ BIF_ERROR(BIF_P, BADARG);
+}
- /* alloc and build */
+BIF_RETTYPE erts_debug_lcnt_control_2(BIF_ALIST_2)
+{
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t category_mask = 0;
+ Eterm categories = BIF_ARG_2;
- hp = HAlloc(BIF_P, hsize);
+ if(!(is_list(categories) || is_nil(categories))) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
- res = lcnt_build_result_term(&hp, NULL, data, res);
-
- erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
+ while(is_list(categories)) {
+ Eterm *cell = list_val(categories);
+ erts_lock_flags_t category;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
- BIF_RET(res);
- } else if (BIF_ARG_1 == am_clear) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
-
- erts_lcnt_clear_counters();
-
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
- BIF_RET(am_ok);
- } else if (is_tuple(BIF_ARG_1)) {
- Eterm* ptr = tuple_val(BIF_ARG_1);
-
- if ((arityval(ptr[0]) == 2) && (ptr[2] == am_false || ptr[2] == am_true)) {
- int lock_opt = 0, enable = (ptr[2] == am_true) ? 1 : 0;
- if (ERTS_IS_ATOM_STR("copy_save", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_COPYSAVE;
- } else if (ERTS_IS_ATOM_STR("process_locks", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_PROCLOCK;
- } else if (ERTS_IS_ATOM_STR("port_locks", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_PORTLOCK;
- } else if (ERTS_IS_ATOM_STR("suspend", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_SUSPEND;
- } else if (ERTS_IS_ATOM_STR("location", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_LOCATION;
- } else {
- BIF_ERROR(BIF_P, BADARG);
- }
+ category = lcnt_atom_to_lock_category(CAR(cell));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
-
- if (enable) res = erts_lcnt_set_rt_opt(lock_opt) ? am_true : am_false;
- else res = erts_lcnt_clear_rt_opt(lock_opt) ? am_true : am_false;
-
-#ifdef ERTS_SMP
- if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PORTLOCK) {
- erts_lcnt_enable_io_lock_count(enable);
- } else if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PROCLOCK) {
- erts_lcnt_enable_proc_lock_count(enable);
+ if(!category) {
+ Eterm *hp = HAlloc(BIF_P, 4);
+
+ BIF_RET(TUPLE3(hp, am_error, am_badarg, CAR(cell)));
}
-#endif
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(res);
+
+ category_mask |= category;
+ categories = CDR(cell);
}
- }
-#endif
+ erts_lcnt_set_category_mask(category_mask);
+
+ BIF_RET(am_ok);
+ } else if(BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
+ int enabled = (BIF_ARG_2 == am_true);
+
+ if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ erts_lcnt_set_preserve_info(enabled);
+
+ BIF_RET(am_ok);
+ }
+ }
+#endif
BIF_ERROR(BIF_P, BADARG);
}
static void os_info_init(void)
{
- Eterm type = erts_atom_put((byte *) os_type, strlen(os_type), ERTS_ATOM_ENC_LATIN1, 1);
+ Eterm type = erts_atom_put((byte *) os_type, sys_strlen(os_type), ERTS_ATOM_ENC_LATIN1, 1);
Eterm flav;
int major, minor, build;
char* buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
Eterm* hp;
os_flavor(buf, 1024);
- flav = erts_atom_put((byte *) buf, strlen(buf), ERTS_ATOM_ENC_LATIN1, 1);
+ flav = erts_atom_put((byte *) buf, sys_strlen(buf), ERTS_ATOM_ENC_LATIN1, 1);
erts_free(ERTS_ALC_T_TMP, (void *) buf);
hp = erts_alloc(ERTS_ALC_T_LITERAL, (3+4)*sizeof(Eterm));
os_type_tuple = TUPLE2(hp, type, flav);
@@ -4563,13 +5138,13 @@ static void os_info_init(void)
void
erts_bif_info_init(void)
{
- erts_smp_atomic_init_nob(&available_internal_state, 0);
- erts_smp_atomic_init_nob(&hipe_test_reschedule_flag, 0);
+ erts_atomic_init_nob(&available_internal_state, 0);
+ erts_atomic_init_nob(&hipe_test_reschedule_flag, 0);
alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1);
alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1);
gather_sched_wall_time_res_trap
- = erts_export_put(am_erlang, am_gather_sched_wall_time_result, 1);
+ = erts_export_put(am_erts_internal, am_gather_sched_wall_time_result, 1);
gather_gc_info_res_trap
= erts_export_put(am_erlang, am_gather_gc_info_result, 1);
gather_io_bytes_trap
@@ -4578,6 +5153,10 @@ erts_bif_info_init(void)
= erts_export_put(am_erts_internal, am_gather_microstate_accounting_result, 2);
gather_system_check_res_trap
= erts_export_put(am_erts_internal, am_gather_system_check_result, 1);
+
+ is_process_alive_trap = erts_export_put(am_erts_internal, am_is_process_alive, 1);
+
+
process_info_init();
os_info_init();
}
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index 73d327da3e..aaf262780f 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -29,12 +29,13 @@
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
-#include "erl_process.h"
-#include "error.h"
#include "bif.h"
+#include "erl_binary.h"
+
static Eterm keyfind(int Bif, Process* p, Eterm Key, Eterm Pos, Eterm List);
+
static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
{
Eterm list;
@@ -146,110 +147,732 @@ BIF_RETTYPE append_2(BIF_ALIST_2)
return append(BIF_P, BIF_ARG_1, BIF_ARG_2);
}
-/*
- * erlang:'--'/2
- */
+/* erlang:'--'/2
+ *
+ * Subtracts a list from another (LHS -- RHS), removing the first occurrence of
+ * each element in LHS from RHS. There is no type coercion so the elements must
+ * match exactly.
+ *
+ * The BIF is broken into several stages that can all trap individually, and it
+ * chooses its algorithm based on input size. If either input is small it will
+ * use a linear scan tuned to which side it's on, and if both inputs are large
+ * enough it will convert RHS into a multiset to provide good asymptotic
+ * behavior. */
-#define SMALL_VEC_SIZE 10
-static Eterm subtract(Process* p, Eterm A, Eterm B)
-{
- Eterm list;
- Eterm* hp;
- Uint need;
- Eterm res;
- Eterm small_vec[SMALL_VEC_SIZE]; /* Preallocated memory for small lists */
- Eterm* vec_p;
- Eterm* vp;
- Sint i;
- Sint n;
- Sint m;
-
- if ((n = erts_list_length(A)) < 0) {
- BIF_ERROR(p, BADARG);
+#define SUBTRACT_LHS_THRESHOLD 16
+#define SUBTRACT_RHS_THRESHOLD 16
+
+typedef enum {
+ SUBTRACT_STAGE_START,
+ SUBTRACT_STAGE_LEN_LHS,
+
+ /* Naive linear scan that's efficient when
+ * LEN_LHS <= SUBTRACT_LHS_THRESHOLD. */
+ SUBTRACT_STAGE_NAIVE_LHS,
+
+ SUBTRACT_STAGE_LEN_RHS,
+
+ /* As SUBTRACT_STAGE_NAIVE_LHS but for RHS. */
+ SUBTRACT_STAGE_NAIVE_RHS,
+
+ /* Creates a multiset from RHS for faster lookups before sweeping through
+ * LHS. The set is implemented as a red-black tree and duplicate elements
+ * are handled by a counter on each node. */
+ SUBTRACT_STAGE_SET_BUILD,
+ SUBTRACT_STAGE_SET_FINISH
+} ErtsSubtractCtxStage;
+
+typedef struct subtract_node__ {
+ struct subtract_node__ *parent;
+ struct subtract_node__ *left;
+ struct subtract_node__ *right;
+ int is_red;
+
+ Eterm key;
+ Uint count;
+} subtract_tree_t;
+
+typedef struct {
+ ErtsSubtractCtxStage stage;
+
+ Eterm lhs_original;
+ Eterm rhs_original;
+
+ Uint lhs_remaining;
+ Uint rhs_remaining;
+
+ Eterm iterator;
+
+ Eterm *result_cdr;
+ Eterm result;
+
+ union {
+ Eterm lhs_elements[SUBTRACT_LHS_THRESHOLD];
+ Eterm rhs_elements[SUBTRACT_RHS_THRESHOLD];
+
+ struct {
+ subtract_tree_t *tree;
+
+ /* A memory area for the tree's nodes, saving us the need to have
+ * one allocation per node. */
+ subtract_tree_t *alloc_start;
+ subtract_tree_t *alloc;
+ } rhs_set;
+ } u;
+} ErtsSubtractContext;
+
+#define ERTS_RBT_PREFIX subtract
+#define ERTS_RBT_T subtract_tree_t
+#define ERTS_RBT_KEY_T Eterm
+#define ERTS_RBT_FLAGS_T int
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->parent = NULL; \
+ (T)->left = NULL; \
+ (T)->right = NULL; \
+ } while(0)
+#define ERTS_RBT_IS_RED(T) ((T)->is_red)
+#define ERTS_RBT_SET_RED(T) ((T)->is_red = 1)
+#define ERTS_RBT_IS_BLACK(T) (!ERTS_RBT_IS_RED(T))
+#define ERTS_RBT_SET_BLACK(T) ((T)->is_red = 0)
+#define ERTS_RBT_GET_FLAGS(T) ((T)->is_red)
+#define ERTS_RBT_SET_FLAGS(T, F) ((T)->is_red = F)
+#define ERTS_RBT_GET_PARENT(T) ((T)->parent)
+#define ERTS_RBT_SET_PARENT(T, P) ((T)->parent = P)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->key)
+#define ERTS_RBT_CMP_KEYS(KX, KY) CMP_TERM(KX, KY)
+#define ERTS_RBT_WANT_LOOKUP_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+static int subtract_continue(Process *p, ErtsSubtractContext *context);
+
+static void subtract_ctx_dtor(ErtsSubtractContext *context) {
+ switch (context->stage) {
+ case SUBTRACT_STAGE_SET_BUILD:
+ case SUBTRACT_STAGE_SET_FINISH:
+ erts_free(ERTS_ALC_T_LIST_TRAP, context->u.rhs_set.alloc_start);
+ break;
+ default:
+ break;
}
- if ((m = erts_list_length(B)) < 0) {
- BIF_ERROR(p, BADARG);
+}
+
+static int subtract_ctx_bin_dtor(Binary *context_bin) {
+ ErtsSubtractContext *context = ERTS_MAGIC_BIN_DATA(context_bin);
+ subtract_ctx_dtor(context);
+ return 1;
+}
+
+static void subtract_ctx_move(ErtsSubtractContext *from,
+ ErtsSubtractContext *to) {
+ int uses_result_cdr = 0;
+
+ to->stage = from->stage;
+
+ to->lhs_original = from->lhs_original;
+ to->rhs_original = from->rhs_original;
+
+ to->lhs_remaining = from->lhs_remaining;
+ to->rhs_remaining = from->rhs_remaining;
+
+ to->iterator = from->iterator;
+ to->result = from->result;
+
+ switch (to->stage) {
+ case SUBTRACT_STAGE_NAIVE_LHS:
+ sys_memcpy(to->u.lhs_elements,
+ from->u.lhs_elements,
+ sizeof(Eterm) * to->lhs_remaining);
+ break;
+ case SUBTRACT_STAGE_NAIVE_RHS:
+ sys_memcpy(to->u.rhs_elements,
+ from->u.rhs_elements,
+ sizeof(Eterm) * to->rhs_remaining);
+
+ uses_result_cdr = 1;
+ break;
+ case SUBTRACT_STAGE_SET_FINISH:
+ uses_result_cdr = 1;
+ /* FALL THROUGH */
+ case SUBTRACT_STAGE_SET_BUILD:
+ to->u.rhs_set.alloc_start = from->u.rhs_set.alloc_start;
+ to->u.rhs_set.alloc = from->u.rhs_set.alloc;
+ to->u.rhs_set.tree = from->u.rhs_set.tree;
+ break;
+ default:
+ break;
}
-
- if (n == 0)
- BIF_RET(NIL);
- if (m == 0)
- BIF_RET(A);
-
- /* allocate element vector */
- if (n <= SMALL_VEC_SIZE)
- vec_p = small_vec;
- else
- vec_p = (Eterm*) erts_alloc(ERTS_ALC_T_TMP, n * sizeof(Eterm));
-
- /* PUT ALL ELEMENTS IN VP */
- vp = vec_p;
- list = A;
- i = n;
- while(i--) {
- Eterm* listp = list_val(list);
- *vp++ = CAR(listp);
- list = CDR(listp);
+
+ if (uses_result_cdr) {
+ if (from->result_cdr == &from->result) {
+ to->result_cdr = &to->result;
+ } else {
+ to->result_cdr = from->result_cdr;
+ }
}
-
- /* UNMARK ALL DELETED CELLS */
- list = B;
- m = 0; /* number of deleted elements */
- while(is_list(list)) {
- Eterm* listp = list_val(list);
- Eterm elem = CAR(listp);
- i = n;
- vp = vec_p;
- while(i--) {
- if (is_value(*vp) && eq(*vp, elem)) {
- *vp = THE_NON_VALUE;
- m++;
- break;
- }
- vp++;
- }
- list = CDR(listp);
+}
+
+static Eterm subtract_create_trap_state(Process *p,
+ ErtsSubtractContext *context) {
+ Binary *state_bin;
+ Eterm *hp;
+
+ state_bin = erts_create_magic_binary(sizeof(ErtsSubtractContext),
+ subtract_ctx_bin_dtor);
+
+ subtract_ctx_move(context, ERTS_MAGIC_BIN_DATA(state_bin));
+
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+
+ return erts_mk_magic_ref(&hp, &MSO(p), state_bin);
+}
+
+static int subtract_enter_len_lhs(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_LEN_LHS;
+
+ context->iterator = context->lhs_original;
+ context->lhs_remaining = 0;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_enter_len_rhs(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_LEN_RHS;
+
+ context->iterator = context->rhs_original;
+ context->rhs_remaining = 0;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_get_length(Process *p, Eterm *iterator_p, Uint *count_p) {
+ static const Sint ELEMENTS_PER_RED = 32;
+
+ Sint budget, count;
+ Eterm iterator;
+
+ budget = ELEMENTS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ iterator = *iterator_p;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ for (count = 0; count < budget && is_list(iterator); count++) {
+ iterator = CDR(list_val(iterator));
}
-
- if (m == n) /* All deleted ? */
- res = NIL;
- else if (m == 0) /* None deleted ? */
- res = A;
- else { /* REBUILD LIST */
- res = NIL;
- need = 2*(n - m);
- hp = HAlloc(p, need);
- vp = vec_p + n - 1;
- while(vp >= vec_p) {
- if (is_value(*vp)) {
- res = CONS(hp, *vp, res);
- hp += 2;
- }
- vp--;
- }
+
+ if (!is_list(iterator) && !is_nil(iterator)) {
+ return -1;
+ }
+
+ BUMP_REDS(p, count / ELEMENTS_PER_RED);
+
+ *iterator_p = iterator;
+ *count_p += count;
+
+ if (is_nil(iterator)) {
+ return 1;
}
- if (vec_p != small_vec)
- erts_free(ERTS_ALC_T_TMP, (void *) vec_p);
- BIF_RET(res);
+
+ return 0;
}
-BIF_RETTYPE ebif_minusminus_2(BIF_ALIST_2)
-{
- return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+static int subtract_enter_naive_lhs(Process *p, ErtsSubtractContext *context) {
+ Eterm iterator;
+ int i = 0;
+
+ context->stage = SUBTRACT_STAGE_NAIVE_LHS;
+
+ context->iterator = context->rhs_original;
+ context->result = NIL;
+
+ iterator = context->lhs_original;
+
+ while (is_list(iterator)) {
+ const Eterm *cell = list_val(iterator);
+
+ ASSERT(i < SUBTRACT_LHS_THRESHOLD);
+
+ context->u.lhs_elements[i++] = CAR(cell);
+ iterator = CDR(cell);
+ }
+
+ ASSERT(i == context->lhs_remaining);
+
+ return subtract_continue(p, context);
}
-BIF_RETTYPE subtract_2(BIF_ALIST_2)
-{
- return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+static int subtract_naive_lhs(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 16;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+ while (checks < budget && is_list(context->iterator)) {
+ const Eterm *cell;
+ Eterm value, next;
+ int found_at;
+
+ cell = list_val(context->iterator);
+
+ value = CAR(cell);
+ next = CDR(cell);
+
+ for (found_at = 0; found_at < context->lhs_remaining; found_at++) {
+ if (EQ(value, context->u.lhs_elements[found_at])) {
+ /* We shift the array one step down as we have to preserve
+ * order.
+ *
+ * Note that we can't exit early as that would suppress errors
+ * in the right-hand side (this runs prior to determining the
+ * length of RHS). */
+
+ context->lhs_remaining--;
+ sys_memmove(&context->u.lhs_elements[found_at],
+ &context->u.lhs_elements[found_at + 1],
+ (context->lhs_remaining - found_at) * sizeof(Eterm));
+ break;
+ }
+ }
+
+ checks += MAX(1, context->lhs_remaining);
+ context->iterator = next;
+ }
+
+ BUMP_REDS(p, MIN(checks, budget) / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ return 0;
+ } else if (!is_nil(context->iterator)) {
+ return -1;
+ }
+
+ if (context->lhs_remaining > 0) {
+ Eterm *hp;
+ int i;
+
+ hp = HAlloc(p, context->lhs_remaining * 2);
+
+ for (i = context->lhs_remaining - 1; i >= 0; i--) {
+ Eterm value = context->u.lhs_elements[i];
+
+ context->result = CONS(hp, value, context->result);
+ hp += 2;
+ }
+ }
+
+ ASSERT(context->lhs_remaining > 0 || context->result == NIL);
+
+ return 1;
+}
+
+static int subtract_enter_naive_rhs(Process *p, ErtsSubtractContext *context) {
+ Eterm iterator;
+ int i = 0;
+
+ context->stage = SUBTRACT_STAGE_NAIVE_RHS;
+
+ context->iterator = context->lhs_original;
+ context->result_cdr = &context->result;
+ context->result = NIL;
+
+ iterator = context->rhs_original;
+
+ while (is_list(iterator)) {
+ const Eterm *cell = list_val(iterator);
+
+ ASSERT(i < SUBTRACT_RHS_THRESHOLD);
+
+ context->u.rhs_elements[i++] = CAR(cell);
+ iterator = CDR(cell);
+ }
+
+ ASSERT(i == context->rhs_remaining);
+
+ return subtract_continue(p, context);
}
+static int subtract_naive_rhs(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 16;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (checks < budget && is_list(context->iterator)) {
+ const Eterm *cell;
+ Eterm value, next;
+ int found_at;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ for (found_at = context->rhs_remaining - 1; found_at >= 0; found_at--) {
+ if (EQ(value, context->u.rhs_elements[found_at])) {
+ break;
+ }
+ }
+
+ if (found_at < 0) {
+ /* Destructively add the value to the result. This is safe
+ * since the GC is disabled and the unfinished term is never
+ * leaked to the outside world. */
+ Eterm *hp = HAllocX(p, 2, context->lhs_remaining * 2);
+
+ *context->result_cdr = make_list(hp);
+ context->result_cdr = &CDR(hp);
+
+ CAR(hp) = value;
+ } else if (found_at >= 0) {
+ Eterm swap;
+
+ if (context->rhs_remaining-- == 1) {
+ /* We've run out of items to remove, so the rest of the
+ * result will be equal to the remainder of the input. We know
+ * that LHS is well-formed as any errors would've been reported
+ * during length determination. */
+ *context->result_cdr = next;
+
+ BUMP_REDS(p, MIN(budget, checks) / CHECKS_PER_RED);
+
+ return 1;
+ }
+
+ swap = context->u.rhs_elements[context->rhs_remaining];
+ context->u.rhs_elements[found_at] = swap;
+ }
+
+ checks += context->rhs_remaining;
+ context->iterator = next;
+ context->lhs_remaining--;
+ }
+
+ /* The result only has to be terminated when returning it to the user, but
+ * we're doing it when trapping as well to prevent headaches when
+ * debugging. */
+ *context->result_cdr = NIL;
+
+ BUMP_REDS(p, MIN(budget, checks) / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ ASSERT(context->lhs_remaining > 0 && context->rhs_remaining > 0);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int subtract_enter_set_build(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_SET_BUILD;
+
+ context->u.rhs_set.alloc_start =
+ erts_alloc(ERTS_ALC_T_LIST_TRAP,
+ context->rhs_remaining * sizeof(subtract_tree_t));
+
+ context->u.rhs_set.alloc = context->u.rhs_set.alloc_start;
+ context->u.rhs_set.tree = NULL;
+
+ context->iterator = context->rhs_original;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_set_build(Process *p, ErtsSubtractContext *context) {
+ const static Sint INSERTIONS_PER_RED = 16;
+ Sint budget, insertions;
+
+ budget = INSERTIONS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ insertions = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (insertions < budget && is_list(context->iterator)) {
+ subtract_tree_t *existing_node, *new_node;
+ const Eterm *cell;
+ Eterm value, next;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ new_node = context->u.rhs_set.alloc;
+ new_node->key = value;
+ new_node->count = 1;
+
+ existing_node = subtract_rbt_lookup_insert(&context->u.rhs_set.tree,
+ new_node);
+
+ if (existing_node != NULL) {
+ existing_node->count++;
+ } else {
+ context->u.rhs_set.alloc++;
+ }
+
+ context->iterator = next;
+ insertions++;
+ }
+
+ BUMP_REDS(p, insertions / INSERTIONS_PER_RED);
+
+ ASSERT(is_list(context->iterator) || is_nil(context->iterator));
+ ASSERT(context->u.rhs_set.tree != NULL);
+
+ return is_nil(context->iterator);
+}
+
+static int subtract_enter_set_finish(Process *p, ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_SET_FINISH;
+
+ context->result_cdr = &context->result;
+ context->result = NIL;
+
+ context->iterator = context->lhs_original;
+
+ return subtract_continue(p, context);
+}
+
+static int subtract_set_finish(Process *p, ErtsSubtractContext *context) {
+ const Sint CHECKS_PER_RED = 8;
+ Sint checks, budget;
+
+ budget = CHECKS_PER_RED * ERTS_BIF_REDS_LEFT(p);
+ checks = 0;
+
+#ifdef DEBUG
+ budget = budget / 10 + 1;
+#endif
+
+ while (checks < budget && is_list(context->iterator)) {
+ subtract_tree_t *node;
+ const Eterm *cell;
+ Eterm value, next;
+
+ cell = list_val(context->iterator);
+ value = CAR(cell);
+ next = CDR(cell);
+
+ ASSERT(context->rhs_remaining > 0);
+
+ node = subtract_rbt_lookup(context->u.rhs_set.tree, value);
+
+ if (node == NULL) {
+ Eterm *hp = HAllocX(p, 2, context->lhs_remaining * 2);
+
+ *context->result_cdr = make_list(hp);
+ context->result_cdr = &CDR(hp);
+
+ CAR(hp) = value;
+ } else {
+ if (context->rhs_remaining-- == 1) {
+ *context->result_cdr = next;
+
+ BUMP_REDS(p, checks / CHECKS_PER_RED);
+
+ return 1;
+ }
+
+ if (node->count-- == 1) {
+ subtract_rbt_delete(&context->u.rhs_set.tree, node);
+ }
+ }
+
+ context->iterator = next;
+ context->lhs_remaining--;
+ checks++;
+ }
+
+ *context->result_cdr = NIL;
+
+ BUMP_REDS(p, checks / CHECKS_PER_RED);
+
+ if (is_list(context->iterator)) {
+ ASSERT(context->lhs_remaining > 0 && context->rhs_remaining > 0);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int subtract_continue(Process *p, ErtsSubtractContext *context) {
+ switch (context->stage) {
+ case SUBTRACT_STAGE_START: {
+ return subtract_enter_len_lhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_LEN_LHS: {
+ int res = subtract_get_length(p,
+ &context->iterator,
+ &context->lhs_remaining);
+
+ if (res != 1) {
+ return res;
+ }
+
+ if (context->lhs_remaining <= SUBTRACT_LHS_THRESHOLD) {
+ return subtract_enter_naive_lhs(p, context);
+ }
+
+ return subtract_enter_len_rhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_NAIVE_LHS: {
+ return subtract_naive_lhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_LEN_RHS: {
+ int res = subtract_get_length(p,
+ &context->iterator,
+ &context->rhs_remaining);
+
+ if (res != 1) {
+ return res;
+ }
+
+ /* We've walked through both lists fully now so we no longer need
+ * to check for errors past this point. */
+
+ if (context->rhs_remaining <= SUBTRACT_RHS_THRESHOLD) {
+ return subtract_enter_naive_rhs(p, context);
+ }
+
+ return subtract_enter_set_build(p, context);
+ }
+
+ case SUBTRACT_STAGE_NAIVE_RHS: {
+ return subtract_naive_rhs(p, context);
+ }
+
+ case SUBTRACT_STAGE_SET_BUILD: {
+ int res = subtract_set_build(p, context);
+
+ if (res != 1) {
+ return res;
+ }
+
+ return subtract_enter_set_finish(p, context);
+ }
+
+ case SUBTRACT_STAGE_SET_FINISH: {
+ return subtract_set_finish(p, context);
+ }
+
+ default:
+ ERTS_ASSERT(!"unreachable");
+ }
+}
+
+static int subtract_start(Process *p, Eterm lhs, Eterm rhs,
+ ErtsSubtractContext *context) {
+ context->stage = SUBTRACT_STAGE_START;
+
+ context->lhs_original = lhs;
+ context->rhs_original = rhs;
+
+ return subtract_continue(p, context);
+}
+
+/* erlang:'--'/2 */
+static Eterm subtract(Export *bif_entry, BIF_ALIST_2) {
+ Eterm lhs = BIF_ARG_1, rhs = BIF_ARG_2;
+
+ if ((is_list(lhs) || is_nil(lhs)) && (is_list(rhs) || is_nil(rhs))) {
+ /* We start with the context on the stack in the hopes that we won't
+ * have to trap. */
+ ErtsSubtractContext context;
+ int res;
+
+ res = subtract_start(BIF_P, lhs, rhs, &context);
+
+ if (res == 0) {
+ Eterm state_mref;
+
+ state_mref = subtract_create_trap_state(BIF_P, &context);
+ erts_set_gc_state(BIF_P, 0);
+
+ BIF_TRAP2(bif_entry, BIF_P, state_mref, NIL);
+ }
+
+ subtract_ctx_dtor(&context);
+
+ if (res < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ BIF_RET(context.result);
+ } else if (is_internal_magic_ref(lhs)) {
+ ErtsSubtractContext *context;
+ int (*dtor)(Binary*);
+ Binary *magic_bin;
+
+ int res;
+
+ magic_bin = erts_magic_ref2bin(lhs);
+ dtor = ERTS_MAGIC_BIN_DESTRUCTOR(magic_bin);
+
+ if (dtor != subtract_ctx_bin_dtor) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+ ASSERT(rhs == NIL);
+
+ context = ERTS_MAGIC_BIN_DATA(magic_bin);
+ res = subtract_continue(BIF_P, context);
+
+ if (res == 0) {
+ BIF_TRAP2(bif_entry, BIF_P, lhs, NIL);
+ }
+
+ erts_set_gc_state(BIF_P, 1);
+
+ if (res < 0) {
+ ERTS_BIF_ERROR_TRAPPED2(BIF_P, BADARG, bif_entry,
+ context->lhs_original,
+ context->rhs_original);
+ }
+
+ BIF_RET(context->result);
+ }
+
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE ebif_minusminus_2(BIF_ALIST_2) {
+ return subtract(bif_export[BIF_ebif_minusminus_2], BIF_CALL_ARGS);
+}
+
+BIF_RETTYPE subtract_2(BIF_ALIST_2) {
+ return subtract(bif_export[BIF_subtract_2], BIF_CALL_ARGS);
+}
+
+
BIF_RETTYPE lists_member_2(BIF_ALIST_2)
{
Eterm term;
Eterm list;
Eterm item;
int non_immed_key;
- int max_iter = 10 * CONTEXT_REDS;
+ int reds_left = ERTS_BIF_REDS_LEFT(BIF_P);
+ int max_iter = 16 * reds_left;
if (is_nil(BIF_ARG_2)) {
BIF_RET(am_false);
@@ -267,85 +890,138 @@ BIF_RETTYPE lists_member_2(BIF_ALIST_2)
}
item = CAR(list_val(list));
if ((item == term) || (non_immed_key && eq(item, term))) {
- BIF_RET2(am_true, CONTEXT_REDS - max_iter/10);
+ BIF_RET2(am_true, reds_left - max_iter/16);
}
list = CDR(list_val(list));
}
if (is_not_nil(list)) {
+ BUMP_REDS(BIF_P, reds_left - max_iter/16);
BIF_ERROR(BIF_P, BADARG);
}
- BIF_RET2(am_false, CONTEXT_REDS - max_iter/10);
+ BIF_RET2(am_false, reds_left - max_iter/16);
}
-BIF_RETTYPE lists_reverse_2(BIF_ALIST_2)
+static BIF_RETTYPE lists_reverse_alloc(Process *c_p,
+ Eterm list_in,
+ Eterm tail_in)
{
- Eterm list;
- Eterm tmp_list;
- Eterm result;
- Eterm* hp;
- Uint n;
- int max_iter;
-
- /*
- * Handle legal and illegal non-lists quickly.
- */
- if (is_nil(BIF_ARG_1)) {
- BIF_RET(BIF_ARG_2);
- } else if (is_not_list(BIF_ARG_1)) {
- error:
- BIF_ERROR(BIF_P, BADARG);
+ static const Uint CELLS_PER_RED = 40;
+
+ Eterm *alloc_top, *alloc_end;
+ Uint cells_left, max_cells;
+ Eterm list, tail;
+ Eterm lookahead;
+
+ list = list_in;
+ tail = tail_in;
+
+ cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p));
+ lookahead = list;
+
+ while (cells_left != 0 && is_list(lookahead)) {
+ lookahead = CDR(list_val(lookahead));
+ cells_left--;
+ }
+
+ BUMP_REDS(c_p, (max_cells - cells_left) / CELLS_PER_RED);
+
+ if (is_not_list(lookahead) && is_not_nil(lookahead)) {
+ BIF_ERROR(c_p, BADARG);
+ }
+
+ alloc_top = HAlloc(c_p, 2 * (max_cells - cells_left));
+ alloc_end = alloc_top + 2 * (max_cells - cells_left);
+
+ while (alloc_top < alloc_end) {
+ Eterm *pair = list_val(list);
+
+ tail = CONS(alloc_top, CAR(pair), tail);
+ list = CDR(pair);
+
+ ASSERT(is_list(list) || is_nil(list));
+
+ alloc_top += 2;
}
- /*
- * First use the rest of the remaning heap space.
- */
- list = BIF_ARG_1;
- result = BIF_ARG_2;
- hp = HEAP_TOP(BIF_P);
- n = HeapWordsLeft(BIF_P) / 2;
- while (n != 0 && is_list(list)) {
- Eterm* pair = list_val(list);
- result = CONS(hp, CAR(pair), result);
- list = CDR(pair);
- hp += 2;
- n--;
- }
- HEAP_TOP(BIF_P) = hp;
if (is_nil(list)) {
- BIF_RET(result);
- }
-
- /*
- * Calculate length of remaining list (up to a suitable limit).
- */
- max_iter = CONTEXT_REDS * 40;
- n = 0;
- tmp_list = list;
- while (max_iter-- > 0 && is_list(tmp_list)) {
- tmp_list = CDR(list_val(tmp_list));
- n++;
- }
- if (is_not_nil(tmp_list) && is_not_list(tmp_list)) {
- goto error;
- }
-
- /*
- * Now do one HAlloc() and continue reversing.
- */
- hp = HAlloc(BIF_P, 2*n);
- while (n != 0 && is_list(list)) {
- Eterm* pair = list_val(list);
- result = CONS(hp, CAR(pair), result);
- list = CDR(pair);
- hp += 2;
- n--;
+ BIF_RET(tail);
+ }
+
+ ASSERT(is_list(tail) && cells_left == 0);
+ BIF_TRAP2(bif_export[BIF_lists_reverse_2], c_p, list, tail);
+}
+
+static BIF_RETTYPE lists_reverse_onheap(Process *c_p,
+ Eterm list_in,
+ Eterm tail_in)
+{
+ static const Uint CELLS_PER_RED = 60;
+
+ Eterm *alloc_start, *alloc_top, *alloc_end;
+ Uint cells_left, max_cells;
+ Eterm list, tail;
+
+ list = list_in;
+ tail = tail_in;
+
+ cells_left = max_cells = CELLS_PER_RED * (1 + ERTS_BIF_REDS_LEFT(c_p));
+
+ ASSERT(HEAP_LIMIT(c_p) >= HEAP_TOP(c_p) + 2);
+ alloc_start = HEAP_TOP(c_p);
+ alloc_end = HEAP_LIMIT(c_p) - 2;
+ alloc_top = alloc_start;
+
+ /* Don't process more cells than we have reductions for. */
+ alloc_end = MIN(alloc_top + (cells_left * 2), alloc_end);
+
+ while (alloc_top < alloc_end && is_list(list)) {
+ Eterm *pair = list_val(list);
+
+ tail = CONS(alloc_top, CAR(pair), tail);
+ list = CDR(pair);
+
+ alloc_top += 2;
}
+
+ cells_left -= (alloc_top - alloc_start) / 2;
+ HEAP_TOP(c_p) = alloc_top;
+
+ ASSERT(cells_left >= 0 && cells_left <= max_cells);
+ BUMP_REDS(c_p, (max_cells - cells_left) / CELLS_PER_RED);
+
if (is_nil(list)) {
- BIF_RET(result);
- } else {
- BUMP_ALL_REDS(BIF_P);
- BIF_TRAP2(bif_export[BIF_lists_reverse_2], BIF_P, list, result);
+ BIF_RET(tail);
+ } else if (is_list(list)) {
+ ASSERT(is_list(tail));
+
+ if (cells_left > CELLS_PER_RED) {
+ return lists_reverse_alloc(c_p, list, tail);
+ }
+
+ BUMP_ALL_REDS(c_p);
+ BIF_TRAP2(bif_export[BIF_lists_reverse_2], c_p, list, tail);
}
+
+ BIF_ERROR(c_p, BADARG);
+}
+
+BIF_RETTYPE lists_reverse_2(BIF_ALIST_2)
+{
+ /* Handle legal and illegal non-lists quickly. */
+ if (is_nil(BIF_ARG_1)) {
+ BIF_RET(BIF_ARG_2);
+ } else if (is_not_list(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ /* We build the reversal on the unused part of the heap if possible to save
+ * us the trouble of having to figure out the list size. We fall back to
+ * lists_reverse_alloc when we run out of space. */
+ if (HeapWordsLeft(BIF_P) > 8) {
+ return lists_reverse_onheap(BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
+ return lists_reverse_alloc(BIF_P, BIF_ARG_1, BIF_ARG_2);
}
BIF_RETTYPE
diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c
index aecb8bf0c1..a594ec1493 100644
--- a/erts/emulator/beam/erl_bif_op.c
+++ b/erts/emulator/beam/erl_bif_op.c
@@ -260,7 +260,7 @@ Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2)
} else if (is_export(arg1)) {
Export* exp = (Export *) (export_val(arg1)[1]);
- if (exp->code[2] == (Uint) arity) {
+ if (exp->info.mfa.arity == (Uint) arity) {
BIF_RET(am_true);
}
}
diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c
index 46777d3aa5..ce2b27409b 100644
--- a/erts/emulator/beam/erl_bif_os.c
+++ b/erts/emulator/beam/erl_bif_os.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -36,6 +36,7 @@
#include "big.h"
#include "dist.h"
#include "erl_version.h"
+#include "erl_osenv.h"
/*
* Return the pid for the Erlang process in the host OS.
@@ -65,141 +66,90 @@ BIF_RETTYPE os_getpid_0(BIF_ALIST_0)
BIF_RET(buf_to_intlist(&hp, pid_string, n, NIL));
}
-BIF_RETTYPE os_getenv_0(BIF_ALIST_0)
+static void os_getenv_foreach(Process *process, Eterm *result, Eterm key, Eterm value)
{
- GETENV_STATE state;
- char *cp;
- Eterm* hp;
- Eterm ret;
- Eterm str;
+ Eterm kvp_term, *hp;
- init_getenv_state(&state);
+ hp = HAlloc(process, 5);
+ kvp_term = TUPLE2(hp, key, value);
+ hp += 3;
- ret = NIL;
- while ((cp = getenv_string(&state)) != NULL) {
- str = erts_convert_native_to_filename(BIF_P,(byte *)cp);
- hp = HAlloc(BIF_P, 2);
- ret = CONS(hp, str, ret);
- }
+ (*result) = CONS(hp, kvp_term, (*result));
+}
- fini_getenv_state(&state);
+BIF_RETTYPE os_list_env_vars_0(BIF_ALIST_0)
+{
+ const erts_osenv_t *global_env;
+ Eterm result = NIL;
- return ret;
+ global_env = erts_sys_rlock_global_osenv();
+ erts_osenv_foreach_term(global_env, BIF_P, &result, (void*)&os_getenv_foreach);
+ erts_sys_runlock_global_osenv();
+
+ return result;
}
-#define STATIC_BUF_SIZE 1024
-BIF_RETTYPE os_getenv_1(BIF_ALIST_1)
+BIF_RETTYPE os_get_env_var_1(BIF_ALIST_1)
{
- Process* p = BIF_P;
- Eterm str;
- Sint len;
- int res;
- char *key_str, *val;
- char buf[STATIC_BUF_SIZE];
- size_t val_size = sizeof(buf);
-
- key_str = erts_convert_filename_to_native(BIF_ARG_1,buf,STATIC_BUF_SIZE,
- ERTS_ALC_T_TMP,1,0,&len);
-
- if (!key_str) {
- BIF_ERROR(p, BADARG);
- }
+ const erts_osenv_t *global_env;
+ Eterm out_term;
+ int error;
- if (key_str != &buf[0])
- val = &buf[0];
- else {
- /* len includes zero byte */
- val_size -= len;
- val = &buf[len];
- }
- res = erts_sys_getenv(key_str, val, &val_size);
-
- if (res < 0) {
- no_var:
- str = am_false;
- } else {
- if (res > 0) {
- val = erts_alloc(ERTS_ALC_T_TMP, val_size);
- while (1) {
- res = erts_sys_getenv(key_str, val, &val_size);
- if (res == 0)
- break;
- else if (res < 0)
- goto no_var;
- else
- val = erts_realloc(ERTS_ALC_T_TMP, val, val_size);
- }
- }
- str = erts_convert_native_to_filename(p,(byte *)val);
- }
- if (key_str != &buf[0])
- erts_free(ERTS_ALC_T_TMP, key_str);
- if (val < &buf[0] || &buf[sizeof(buf)-1] < val)
- erts_free(ERTS_ALC_T_TMP, val);
- BIF_RET(str);
+ global_env = erts_sys_rlock_global_osenv();
+ error = erts_osenv_get_term(global_env, BIF_P, BIF_ARG_1, &out_term);
+ erts_sys_runlock_global_osenv();
+
+ if (error == 0) {
+ return am_false;
+ } else if (error < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ return out_term;
}
-BIF_RETTYPE os_putenv_2(BIF_ALIST_2)
+BIF_RETTYPE os_set_env_var_2(BIF_ALIST_2)
{
- char def_buf_key[STATIC_BUF_SIZE];
- char def_buf_value[STATIC_BUF_SIZE];
- char *key_buf, *value_buf;
-
- key_buf = erts_convert_filename_to_native(BIF_ARG_1,def_buf_key,
- STATIC_BUF_SIZE,
- ERTS_ALC_T_TMP,0,0,NULL);
- if (!key_buf) {
- BIF_ERROR(BIF_P, BADARG);
- }
- value_buf = erts_convert_filename_to_native(BIF_ARG_2,def_buf_value,
- STATIC_BUF_SIZE,
- ERTS_ALC_T_TMP,1,0,
- NULL);
- if (!value_buf) {
- if (key_buf != def_buf_key) {
- erts_free(ERTS_ALC_T_TMP, key_buf);
- }
- BIF_ERROR(BIF_P, BADARG);
- }
-
-
- if (erts_sys_putenv(key_buf, value_buf)) {
- if (key_buf != def_buf_key) {
- erts_free(ERTS_ALC_T_TMP, key_buf);
- }
- if (value_buf != def_buf_value) {
- erts_free(ERTS_ALC_T_TMP, value_buf);
- }
- BIF_ERROR(BIF_P, BADARG);
- }
- if (key_buf != def_buf_key) {
- erts_free(ERTS_ALC_T_TMP, key_buf);
- }
- if (value_buf != def_buf_value) {
- erts_free(ERTS_ALC_T_TMP, value_buf);
+ erts_osenv_t *global_env;
+ int error;
+
+ global_env = erts_sys_rwlock_global_osenv();
+ error = erts_osenv_put_term(global_env, BIF_ARG_1, BIF_ARG_2);
+ erts_sys_rwunlock_global_osenv();
+
+ if (error < 0) {
+ BIF_ERROR(BIF_P, BADARG);
}
+
BIF_RET(am_true);
}
-BIF_RETTYPE os_unsetenv_1(BIF_ALIST_1)
+BIF_RETTYPE os_unset_env_var_1(BIF_ALIST_1)
{
- char *key_buf;
- char buf[STATIC_BUF_SIZE];
+ erts_osenv_t *global_env;
+ int error;
- key_buf = erts_convert_filename_to_native(BIF_ARG_1,buf,STATIC_BUF_SIZE,
- ERTS_ALC_T_TMP,0,0,NULL);
- if (!key_buf) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ global_env = erts_sys_rwlock_global_osenv();
+ error = erts_osenv_unset_term(global_env, BIF_ARG_1);
+ erts_sys_rwunlock_global_osenv();
- if (erts_sys_unsetenv(key_buf)) {
- if (key_buf != buf) {
- erts_free(ERTS_ALC_T_TMP, key_buf);
- }
- BIF_ERROR(BIF_P, BADARG);
- }
- if (key_buf != buf) {
- erts_free(ERTS_ALC_T_TMP, key_buf);
+ if (error < 0) {
+ BIF_ERROR(BIF_P, BADARG);
}
+
BIF_RET(am_true);
}
+
+BIF_RETTYPE os_set_signal_2(BIF_ALIST_2) {
+ if (is_atom(BIF_ARG_1) && ((BIF_ARG_2 == am_ignore) ||
+ (BIF_ARG_2 == am_default) ||
+ (BIF_ARG_2 == am_handle))) {
+ if (!erts_set_signal(BIF_ARG_1, BIF_ARG_2))
+ goto error;
+
+ BIF_RET(am_ok);
+ }
+
+error:
+ BIF_ERROR(BIF_P, BADARG);
+}
diff --git a/erts/emulator/beam/erl_bif_persistent.c b/erts/emulator/beam/erl_bif_persistent.c
new file mode 100644
index 0000000000..9dca768a18
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_persistent.c
@@ -0,0 +1,983 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: Implement persistent term storage.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "erl_map.h"
+#include "erl_binary.h"
+
+/*
+ * The limit for the number of persistent terms before
+ * a warning is issued.
+ */
+
+#define WARNING_LIMIT 20000
+#define XSTR(s) STR(s)
+#define STR(s) #s
+
+/*
+ * Parameters for the hash table.
+ */
+#define INITIAL_SIZE 8
+#define LOAD_FACTOR ((Uint)50)
+#define MUST_GROW(t) (((Uint)100) * t->num_entries >= LOAD_FACTOR * t->allocated)
+#define MUST_SHRINK(t) (((Uint)200) * t->num_entries <= LOAD_FACTOR * t->allocated && \
+ t->allocated > INITIAL_SIZE)
+
+typedef struct hash_table {
+ Uint allocated;
+ Uint num_entries;
+ Uint mask;
+ Uint first_to_delete;
+ Uint num_to_delete;
+ erts_atomic_t refc;
+ struct hash_table* delete_next;
+ ErtsThrPrgrLaterOp thr_prog_op;
+ Eterm term[1];
+} HashTable;
+
+typedef struct trap_data {
+ HashTable* table;
+ Uint idx;
+ Uint remaining;
+ Uint memory; /* Used by info/0 to count used memory */
+} TrapData;
+
+/*
+ * Declarations of local functions.
+ */
+
+static HashTable* create_initial_table(void);
+static Uint lookup(HashTable* hash_table, Eterm key);
+static HashTable* copy_table(HashTable* old_table, Uint new_size, int rehash);
+static HashTable* tmp_table_copy(HashTable* old_table);
+static int try_seize_update_permission(Process* c_p);
+static void release_update_permission(int release_updater);
+static void table_updater(void* table);
+static void table_deleter(void* hash_table);
+static void dec_table_refc(Process* c_p, HashTable* old_table);
+static void delete_table(Process* c_p, HashTable* table);
+static void mark_for_deletion(HashTable* hash_table, Uint entry_index);
+static ErtsLiteralArea* term_to_area(Eterm tuple);
+static void suspend_updater(Process* c_p);
+static Eterm do_get_all(Process* c_p, TrapData* trap_data, Eterm res);
+static Eterm do_info(Process* c_p, TrapData* trap_data);
+static void append_to_delete_queue(HashTable* table);
+static HashTable* next_to_delete(void);
+static Eterm alloc_trap_data(Process* c_p);
+static int cleanup_trap_data(Binary *bp);
+
+/*
+ * Traps
+ */
+
+static Export persistent_term_get_all_export;
+static BIF_RETTYPE persistent_term_get_all_trap(BIF_ALIST_2);
+static Export persistent_term_info_export;
+static BIF_RETTYPE persistent_term_info_trap(BIF_ALIST_1);
+
+/*
+ * Pointer to the current hash table.
+ */
+
+static erts_atomic_t the_hash_table;
+
+/*
+ * Queue of processes waiting to update the hash table.
+ */
+
+struct update_queue_item {
+ Process *p;
+ struct update_queue_item* next;
+};
+
+static erts_mtx_t update_table_permission_mtx;
+static struct update_queue_item* update_queue = NULL;
+static Process* updater_process = NULL;
+
+/* Protected by update_table_permission_mtx */
+static ErtsThrPrgrLaterOp thr_prog_op;
+static int issued_warning = 0;
+
+/*
+ * Queue of hash tables to be deleted.
+ */
+
+static erts_mtx_t delete_queue_mtx;
+static HashTable* delete_queue_head = NULL;
+static HashTable** delete_queue_tail = &delete_queue_head;
+
+/*
+ * The following variables are only used during crash dumping. They
+ * are intialized by erts_init_persistent_dumping().
+ */
+
+ErtsLiteralArea** erts_persistent_areas;
+Uint erts_num_persistent_areas;
+
+void erts_init_bif_persistent_term(void)
+{
+ HashTable* hash_table;
+
+ /*
+ * Initialize the mutex protecting updates.
+ */
+
+ erts_mtx_init(&update_table_permission_mtx,
+ "update_persistent_term_permission",
+ NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC |
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ /*
+ * Initialize delete queue.
+ */
+
+ erts_mtx_init(&delete_queue_mtx,
+ "persistent_term_delete_permission",
+ NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC |
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ /*
+ * Allocate a small initial hash table.
+ */
+
+ hash_table = create_initial_table();
+ erts_atomic_init_nob(&the_hash_table, (erts_aint_t)hash_table);
+
+ /*
+ * Initialize export entry for traps
+ */
+
+ erts_init_trap_export(&persistent_term_get_all_export,
+ am_persistent_term, am_get_all_trap, 2,
+ &persistent_term_get_all_trap);
+ erts_init_trap_export(&persistent_term_info_export,
+ am_persistent_term, am_info_trap, 1,
+ &persistent_term_info_trap);
+}
+
+BIF_RETTYPE persistent_term_put_2(BIF_ALIST_2)
+{
+ Eterm key;
+ Eterm term;
+ Eterm heap[3];
+ Eterm tuple;
+ HashTable* hash_table;
+ Uint term_size;
+ Uint lit_area_size;
+ ErlOffHeap code_off_heap;
+ ErtsLiteralArea* literal_area;
+ erts_shcopy_t info;
+ Eterm* ptr;
+ Uint entry_index;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_persistent_term_put_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
+ hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+
+ key = BIF_ARG_1;
+ term = BIF_ARG_2;
+
+ entry_index = lookup(hash_table, key);
+
+ heap[0] = make_arityval(2);
+ heap[1] = key;
+ heap[2] = term;
+ tuple = make_tuple(heap);
+
+ if (is_nil(hash_table->term[entry_index])) {
+ Uint size = hash_table->allocated;
+ if (MUST_GROW(hash_table)) {
+ size *= 2;
+ }
+ hash_table = copy_table(hash_table, size, 0);
+ entry_index = lookup(hash_table, key);
+ hash_table->num_entries++;
+ } else {
+ Eterm tuple = hash_table->term[entry_index];
+ Eterm old_term;
+
+ ASSERT(is_tuple_arity(tuple, 2));
+ old_term = boxed_val(tuple)[2];
+ if (EQ(term, old_term)) {
+ /* Same value. No need to update anything. */
+ release_update_permission(0);
+ BIF_RET(am_ok);
+ } else {
+ /* Mark the old term for deletion. */
+ mark_for_deletion(hash_table, entry_index);
+ hash_table = copy_table(hash_table, hash_table->allocated, 0);
+ }
+ }
+
+ /*
+ * Preserve internal sharing in the term by using the
+ * sharing-preserving functions. However, literals must
+ * be copied in case the module holding them are unloaded.
+ */
+ INITIALIZE_SHCOPY(info);
+ info.copy_literals = 1;
+ term_size = copy_shared_calculate(tuple, &info);
+ ERTS_INIT_OFF_HEAP(&code_off_heap);
+ lit_area_size = ERTS_LITERAL_AREA_ALLOC_SIZE(term_size);
+ literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_area_size);
+ ptr = &literal_area->start[0];
+ literal_area->end = ptr + term_size;
+ tuple = copy_shared_perform(tuple, term_size, &info, &ptr, &code_off_heap);
+ ASSERT(tuple_val(tuple) == literal_area->start);
+ literal_area->off_heap = code_off_heap.first;
+ DESTROY_SHCOPY(info);
+ erts_set_literal_tag(&tuple, literal_area->start, term_size);
+ hash_table->term[entry_index] = tuple;
+
+ erts_schedule_thr_prgr_later_op(table_updater, hash_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+
+ /*
+ * Issue a warning once if the warning limit has been exceeded.
+ */
+
+ if (hash_table->num_entries > WARNING_LIMIT && issued_warning == 0) {
+ static char w[] =
+ "More than " XSTR(WARNING_LIMIT) " persistent terms "
+ "have been created.\n"
+ "It is recommended to avoid creating an excessive number of\n"
+ "persistent terms, as creation and deletion of persistent terms\n"
+ "will be slower as the number of persistent terms increases.\n";
+ issued_warning = 1;
+ erts_send_warning_to_logger_str(BIF_P->group_leader, w);
+ }
+
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
+}
+
+BIF_RETTYPE persistent_term_get_0(BIF_ALIST_0)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ TrapData* trap_data;
+ Eterm res = NIL;
+ Eterm magic_ref;
+ Binary* mbp;
+
+ magic_ref = alloc_trap_data(BIF_P);
+ mbp = erts_magic_ref2bin(magic_ref);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ trap_data->table = hash_table;
+ trap_data->idx = 0;
+ trap_data->remaining = hash_table->num_entries;
+ res = do_get_all(BIF_P, trap_data, res);
+ if (trap_data->remaining == 0) {
+ BUMP_REDS(BIF_P, hash_table->num_entries);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BIF_RET(res);
+ } else {
+ /*
+ * Increment the ref counter to prevent an update operation (by put/2
+ * or erase/1) to delete this hash table.
+ */
+ erts_atomic_inc_nob(&hash_table->refc);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_get_all_export, BIF_P, magic_ref, res);
+ }
+}
+
+BIF_RETTYPE persistent_term_get_1(BIF_ALIST_1)
+{
+ Eterm key = BIF_ARG_1;
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ Uint entry_index;
+ Eterm term;
+
+ entry_index = lookup(hash_table, key);
+ term = hash_table->term[entry_index];
+ if (is_boxed(term)) {
+ ASSERT(is_tuple_arity(term, 2));
+ BIF_RET(tuple_val(term)[2]);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE persistent_term_erase_1(BIF_ALIST_1)
+{
+ Eterm key = BIF_ARG_1;
+ HashTable* old_table;
+ HashTable* new_table;
+ Uint entry_index;
+ Eterm old_term;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD1(bif_export[BIF_persistent_term_erase_1],
+ BIF_P, BIF_ARG_1);
+ }
+
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ entry_index = lookup(old_table, key);
+ old_term = old_table->term[entry_index];
+ if (is_boxed(old_term)) {
+ Uint new_size;
+ HashTable* tmp_table;
+
+ /*
+ * Since we don't use any delete markers, we must rehash
+ * the table when deleting terms to ensure that all terms
+ * can still be reached if there are hash collisions.
+ * We can't rehash in place and it would not be safe to modify
+ * the old table yet, so we will first need a new
+ * temporary table copy of the same size as the old one.
+ */
+
+ ASSERT(is_tuple_arity(old_term, 2));
+ tmp_table = tmp_table_copy(old_table);
+
+ /*
+ * Delete the term from the temporary table. Then copy the
+ * temporary table to a new table, rehashing the entries
+ * while copying.
+ */
+
+ tmp_table->term[entry_index] = NIL;
+ tmp_table->num_entries--;
+ new_size = tmp_table->allocated;
+ if (MUST_SHRINK(tmp_table)) {
+ new_size /= 2;
+ }
+ new_table = copy_table(tmp_table, new_size, 1);
+ erts_free(ERTS_ALC_T_TMP, tmp_table);
+
+ mark_for_deletion(old_table, entry_index);
+ erts_schedule_thr_prgr_later_op(table_updater, new_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ }
+
+ /*
+ * Key is not present. Nothing to do.
+ */
+
+ ASSERT(is_nil(old_term));
+ release_update_permission(0);
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE erts_internal_erase_persistent_terms_0(BIF_ALIST_0)
+{
+ HashTable* old_table;
+ HashTable* new_table;
+
+ if (!try_seize_update_permission(BIF_P)) {
+ ERTS_BIF_YIELD0(bif_export[BIF_erts_internal_erase_persistent_terms_0],
+ BIF_P);
+ }
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ old_table->first_to_delete = 0;
+ old_table->num_to_delete = old_table->allocated;
+ new_table = create_initial_table();
+ erts_schedule_thr_prgr_later_op(table_updater, new_table, &thr_prog_op);
+ suspend_updater(BIF_P);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+}
+
+BIF_RETTYPE persistent_term_info_0(BIF_ALIST_0)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ TrapData* trap_data;
+ Eterm res = NIL;
+ Eterm magic_ref;
+ Binary* mbp;
+
+ magic_ref = alloc_trap_data(BIF_P);
+ mbp = erts_magic_ref2bin(magic_ref);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ trap_data->table = hash_table;
+ trap_data->idx = 0;
+ trap_data->remaining = hash_table->num_entries;
+ trap_data->memory = 0;
+ res = do_info(BIF_P, trap_data);
+ if (trap_data->remaining == 0) {
+ BUMP_REDS(BIF_P, hash_table->num_entries);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BIF_RET(res);
+ } else {
+ /*
+ * Increment the ref counter to prevent an update operation (by put/2
+ * or erase/1) to delete this hash table.
+ */
+ erts_atomic_inc_nob(&hash_table->refc);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_info_export, BIF_P, magic_ref, res);
+ }
+}
+
+Uint
+erts_persistent_term_count(void)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ return hash_table->num_entries;
+}
+
+void
+erts_init_persistent_dumping(void)
+{
+ HashTable* hash_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ ErtsLiteralArea** area_p;
+ Uint i;
+
+ /*
+ * Overwrite the array of Eterms in the current hash table
+ * with pointers to literal areas.
+ */
+
+ erts_persistent_areas = (ErtsLiteralArea **) hash_table->term;
+ erts_num_persistent_areas = hash_table->num_entries;
+ area_p = erts_persistent_areas;
+ for (i = 0; i < hash_table->allocated; i++) {
+ Eterm term = hash_table->term[i];
+
+ if (is_boxed(term)) {
+ *area_p++ = term_to_area(term);
+ }
+ }
+}
+
+/*
+ * Local functions.
+ */
+
+static HashTable*
+create_initial_table(void)
+{
+ HashTable* hash_table;
+ int i;
+
+ hash_table = (HashTable *) erts_alloc(ERTS_ALC_T_PERSISTENT_TERM,
+ sizeof(HashTable)+sizeof(Eterm) *
+ (INITIAL_SIZE-1));
+ hash_table->allocated = INITIAL_SIZE;
+ hash_table->num_entries = 0;
+ hash_table->mask = INITIAL_SIZE-1;
+ hash_table->first_to_delete = 0;
+ hash_table->num_to_delete = 0;
+ erts_atomic_init_nob(&hash_table->refc, (erts_aint_t)1);
+ for (i = 0; i < INITIAL_SIZE; i++) {
+ hash_table->term[i] = NIL;
+ }
+ return hash_table;
+}
+
+static BIF_RETTYPE
+persistent_term_get_all_trap(BIF_ALIST_2)
+{
+ TrapData* trap_data;
+ Eterm res = BIF_ARG_2;
+ Uint bump_reds;
+ Binary* mbp;
+
+ ASSERT(is_list(BIF_ARG_2));
+ mbp = erts_magic_ref2bin(BIF_ARG_1);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ bump_reds = trap_data->remaining;
+ res = do_get_all(BIF_P, trap_data, res);
+ ASSERT(is_list(res));
+ if (trap_data->remaining > 0) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(&persistent_term_get_all_export, BIF_P, BIF_ARG_1, res);
+ } else {
+ /*
+ * Decrement ref count (and possibly delete the hash table
+ * and associated literal area).
+ */
+ dec_table_refc(BIF_P, trap_data->table);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BUMP_REDS(BIF_P, bump_reds);
+ BIF_RET(res);
+ }
+}
+
+static Eterm
+do_get_all(Process* c_p, TrapData* trap_data, Eterm res)
+{
+ HashTable* hash_table;
+ Uint remaining;
+ Uint idx;
+ Uint max_iter;
+ Uint i;
+ Eterm* hp;
+ Uint heap_size;
+ struct copy_term {
+ Uint key_size;
+ Eterm* tuple_ptr;
+ } *copy_data;
+
+ hash_table = trap_data->table;
+ idx = trap_data->idx;
+#if defined(DEBUG) || defined(VALGRIND)
+ max_iter = 50;
+#else
+ max_iter = ERTS_BIF_REDS_LEFT(c_p);
+#endif
+ remaining = trap_data->remaining < max_iter ?
+ trap_data->remaining : max_iter;
+ trap_data->remaining -= remaining;
+
+ copy_data = (struct copy_term *) erts_alloc(ERTS_ALC_T_TMP,
+ remaining *
+ sizeof(struct copy_term));
+ i = 0;
+ heap_size = (2 + 3) * remaining;
+ while (remaining != 0) {
+ Eterm term = hash_table->term[idx];
+ if (is_tuple(term)) {
+ Uint key_size;
+ Eterm* tup_val;
+
+ ASSERT(is_tuple_arity(term, 2));
+ tup_val = tuple_val(term);
+ key_size = size_object(tup_val[1]);
+ copy_data[i].key_size = key_size;
+ copy_data[i].tuple_ptr = tup_val;
+ heap_size += key_size;
+ i++;
+ remaining--;
+ }
+ idx++;
+ }
+ trap_data->idx = idx;
+
+ hp = HAlloc(c_p, heap_size);
+ remaining = i;
+ for (i = 0; i < remaining; i++) {
+ Eterm* tuple_ptr;
+ Uint key_size;
+ Eterm key;
+ Eterm tup;
+
+ tuple_ptr = copy_data[i].tuple_ptr;
+ key_size = copy_data[i].key_size;
+ key = copy_struct(tuple_ptr[1], key_size, &hp, &c_p->off_heap);
+ tup = TUPLE2(hp, key, tuple_ptr[2]);
+ hp += 3;
+ res = CONS(hp, tup, res);
+ hp += 2;
+ }
+ erts_free(ERTS_ALC_T_TMP, copy_data);
+ return res;
+}
+
+static BIF_RETTYPE
+persistent_term_info_trap(BIF_ALIST_1)
+{
+ TrapData* trap_data = (TrapData *) BIF_ARG_1;
+ Eterm res;
+ Uint bump_reds;
+ Binary* mbp;
+
+ mbp = erts_magic_ref2bin(BIF_ARG_1);
+ trap_data = ERTS_MAGIC_BIN_DATA(mbp);
+ bump_reds = trap_data->remaining;
+ res = do_info(BIF_P, trap_data);
+ if (trap_data->remaining > 0) {
+ ASSERT(res == am_ok);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(&persistent_term_info_export, BIF_P, BIF_ARG_1);
+ } else {
+ /*
+ * Decrement ref count (and possibly delete the hash table
+ * and associated literal area).
+ */
+ dec_table_refc(BIF_P, trap_data->table);
+ trap_data->table = NULL; /* Prevent refc decrement */
+ BUMP_REDS(BIF_P, bump_reds);
+ ASSERT(is_map(res));
+ BIF_RET(res);
+ }
+}
+
+#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+
+static Eterm
+do_info(Process* c_p, TrapData* trap_data)
+{
+ HashTable* hash_table;
+ Uint remaining;
+ Uint idx;
+ Uint max_iter;
+
+ hash_table = trap_data->table;
+ idx = trap_data->idx;
+#if defined(DEBUG) || defined(VALGRIND)
+ max_iter = 50;
+#else
+ max_iter = ERTS_BIF_REDS_LEFT(c_p);
+#endif
+ remaining = trap_data->remaining < max_iter ? trap_data->remaining : max_iter;
+ trap_data->remaining -= remaining;
+ while (remaining != 0) {
+ if (is_boxed(hash_table->term[idx])) {
+ ErtsLiteralArea* area;
+ area = term_to_area(hash_table->term[idx]);
+ trap_data->memory += sizeof(ErtsLiteralArea) +
+ sizeof(Eterm) * (area->end - area->start - 1);
+ remaining--;
+ }
+ idx++;
+ }
+ trap_data->idx = idx;
+ if (trap_data->remaining > 0) {
+ return am_ok; /* Dummy return value */
+ } else {
+ Eterm* hp;
+ Eterm count_term;
+ Eterm memory_term;
+ Eterm res;
+ Uint memory;
+ Uint hsz = MAP_SZ(2);
+
+ memory = sizeof(HashTable) + (trap_data->table->allocated-1) *
+ sizeof(Eterm) + trap_data->memory;
+ (void) erts_bld_uint(NULL, &hsz, hash_table->num_entries);
+ (void) erts_bld_uint(NULL, &hsz, memory);
+ hp = HAlloc(c_p, hsz);
+ count_term = erts_bld_uint(&hp, NULL, hash_table->num_entries);
+ memory_term = erts_bld_uint(&hp, NULL, memory);
+ res = MAP2(hp, am_count, count_term, am_memory, memory_term);
+ return res;
+ }
+}
+
+#undef DECL_AM
+
+static Eterm
+alloc_trap_data(Process* c_p)
+{
+ Binary* mbp = erts_create_magic_binary(sizeof(TrapData),
+ cleanup_trap_data);
+ Eterm* hp;
+
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
+}
+
+static int
+cleanup_trap_data(Binary *bp)
+{
+ TrapData* trap_data = ERTS_MAGIC_BIN_DATA(bp);
+
+ if (trap_data->table) {
+ /*
+ * The process has been killed and is now exiting.
+ * Decrement the reference counter for the table.
+ */
+ dec_table_refc(NULL, trap_data->table);
+ }
+ return 1;
+}
+
+static Uint
+lookup(HashTable* hash_table, Eterm key)
+{
+ Uint mask = hash_table->mask;
+ Eterm* table = hash_table->term;
+ Uint32 idx = make_internal_hash(key, 0);
+ Eterm term;
+
+ do {
+ idx++;
+ term = table[idx & mask];
+ } while (is_boxed(term) && !EQ(key, (tuple_val(term))[1]));
+ return idx & mask;
+}
+
+static HashTable*
+tmp_table_copy(HashTable* old_table)
+{
+ Uint size = old_table->allocated;
+ HashTable* tmp_table;
+ Uint i;
+
+ tmp_table = (HashTable *) erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(HashTable) +
+ sizeof(Eterm) * (size-1));
+ *tmp_table = *old_table;
+ for (i = 0; i < size; i++) {
+ tmp_table->term[i] = old_table->term[i];
+ }
+ return tmp_table;
+}
+
+static HashTable*
+copy_table(HashTable* old_table, Uint new_size, int rehash)
+{
+ HashTable* new_table;
+ Uint old_size = old_table->allocated;
+ Uint i;
+
+ new_table = (HashTable *) erts_alloc(ERTS_ALC_T_PERSISTENT_TERM,
+ sizeof(HashTable) +
+ sizeof(Eterm) * (new_size-1));
+ if (old_table->allocated == new_size && !rehash) {
+ /*
+ * Same size and no key deleted. Make an exact copy of the table.
+ */
+ *new_table = *old_table;
+ for (i = 0; i < new_size; i++) {
+ new_table->term[i] = old_table->term[i];
+ }
+ } else {
+ /*
+ * The size of the table has changed or an element has been
+ * deleted. Must rehash, by inserting all old terms into the
+ * new (empty) table.
+ */
+ new_table->allocated = new_size;
+ new_table->num_entries = old_table->num_entries;
+ new_table->mask = new_size - 1;
+ for (i = 0; i < new_size; i++) {
+ new_table->term[i] = NIL;
+ }
+ for (i = 0; i < old_size; i++) {
+ if (is_tuple(old_table->term[i])) {
+ Eterm key = tuple_val(old_table->term[i])[1];
+ Uint entry_index = lookup(new_table, key);
+ ASSERT(is_nil(new_table->term[entry_index]));
+ new_table->term[entry_index] = old_table->term[i];
+ }
+ }
+ }
+ new_table->first_to_delete = 0;
+ new_table->num_to_delete = 0;
+ erts_atomic_init_nob(&new_table->refc, (erts_aint_t)1);
+ return new_table;
+}
+
+static void
+mark_for_deletion(HashTable* hash_table, Uint entry_index)
+{
+ hash_table->first_to_delete = entry_index;
+ hash_table->num_to_delete = 1;
+}
+
+static ErtsLiteralArea*
+term_to_area(Eterm tuple)
+{
+ ASSERT(is_tuple_arity(tuple, 2));
+ return (ErtsLiteralArea *) (((char *) tuple_val(tuple)) -
+ offsetof(ErtsLiteralArea, start));
+}
+
+static void
+table_updater(void* data)
+{
+ HashTable* old_table;
+ HashTable* new_table;
+
+ old_table = (HashTable *) erts_atomic_read_nob(&the_hash_table);
+ new_table = (HashTable *) data;
+ ASSERT(new_table->num_to_delete == 0);
+ erts_atomic_set_nob(&the_hash_table, (erts_aint_t)new_table);
+ append_to_delete_queue(old_table);
+ erts_schedule_thr_prgr_later_op(table_deleter,
+ old_table,
+ &old_table->thr_prog_op);
+ release_update_permission(1);
+}
+
+static void
+table_deleter(void* data)
+{
+ HashTable* old_table = (HashTable *) data;
+
+ dec_table_refc(NULL, old_table);
+}
+
+static void
+dec_table_refc(Process* c_p, HashTable* old_table)
+{
+ erts_aint_t refc = erts_atomic_dec_read_nob(&old_table->refc);
+
+ if (refc == 0) {
+ HashTable* to_delete;
+
+ while ((to_delete = next_to_delete()) != NULL) {
+ delete_table(c_p, to_delete);
+ }
+ }
+}
+
+static void
+delete_table(Process* c_p, HashTable* table)
+{
+ Uint idx = table->first_to_delete;
+ Uint n = table->num_to_delete;
+
+ /*
+ * There are no longer any references to this hash table.
+ *
+ * Any literals pointed for deletion can be queued for
+ * deletion and the table itself can be deallocated.
+ */
+
+#ifdef DEBUG
+ if (n == 1) {
+ ASSERT(is_tuple_arity(table->term[idx], 2));
+ }
+#endif
+
+ while (n > 0) {
+ Eterm term = table->term[idx];
+
+ if (is_tuple_arity(term, 2)) {
+ if (is_immed(tuple_val(term)[2])) {
+ erts_release_literal_area(term_to_area(term));
+ } else {
+ erts_queue_release_literals(c_p, term_to_area(term));
+ }
+ }
+ idx++, n--;
+ }
+ erts_free(ERTS_ALC_T_PERSISTENT_TERM, table);
+}
+
+/*
+ * Caller *must* yield if this function returns 0.
+ */
+
+static int
+try_seize_update_permission(Process* c_p)
+{
+ int success;
+
+ ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */
+ ASSERT(c_p != NULL);
+
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process != c_p);
+ success = (updater_process == NULL);
+ if (success) {
+ updater_process = c_p;
+ } else {
+ struct update_queue_item* qitem;
+ qitem = erts_alloc(ERTS_ALC_T_PERSISTENT_LOCK_Q, sizeof(*qitem));
+ qitem->p = c_p;
+ erts_proc_inc_refc(c_p);
+ qitem->next = update_queue;
+ update_queue = qitem;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ }
+ erts_mtx_unlock(&update_table_permission_mtx);
+ return success;
+}
+
+static void
+release_update_permission(int release_updater)
+{
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process != NULL);
+
+ if (release_updater) {
+ erts_proc_lock(updater_process, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(updater_process)) {
+ erts_resume(updater_process, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_proc_unlock(updater_process, ERTS_PROC_LOCK_STATUS);
+ }
+ updater_process = NULL;
+
+ while (update_queue != NULL) { /* Unleash the entire herd */
+ struct update_queue_item* qitem = update_queue;
+ erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(qitem->p)) {
+ erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ update_queue = qitem->next;
+ erts_proc_dec_refc(qitem->p);
+ erts_free(ERTS_ALC_T_PERSISTENT_LOCK_Q, qitem);
+ }
+ erts_mtx_unlock(&update_table_permission_mtx);
+}
+
+static void
+suspend_updater(Process* c_p)
+{
+#ifdef DEBUG
+ ASSERT(c_p != NULL);
+ erts_mtx_lock(&update_table_permission_mtx);
+ ASSERT(updater_process == c_p);
+ erts_mtx_unlock(&update_table_permission_mtx);
+#endif
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+}
+
+static void
+append_to_delete_queue(HashTable* table)
+{
+ erts_mtx_lock(&delete_queue_mtx);
+ table->delete_next = NULL;
+ *delete_queue_tail = table;
+ delete_queue_tail = &table->delete_next;
+ erts_mtx_unlock(&delete_queue_mtx);
+}
+
+static HashTable*
+next_to_delete(void)
+{
+ HashTable* table;
+
+ erts_mtx_lock(&delete_queue_mtx);
+ table = delete_queue_head;
+ if (table) {
+ if (erts_atomic_read_nob(&table->refc)) {
+ /*
+ * This hash table is still referenced. Hash tables
+ * must be deleted in order, so we return a NULL
+ * pointer.
+ */
+ table = NULL;
+ } else {
+ /*
+ * Remove the first hash table from the queue.
+ */
+ delete_queue_head = table->delete_next;
+ if (delete_queue_head == NULL) {
+ delete_queue_tail = &delete_queue_head;
+ }
+ }
+ }
+ erts_mtx_unlock(&delete_queue_mtx);
+ return table;
+}
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 90e78a9b0b..7fe4e02782 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -43,9 +43,10 @@
#include "erl_bits.h"
#include "erl_bif_unique.h"
#include "dtrace-wrapper.h"
+#include "erl_proc_sig_queue.h"
static Port *open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump);
-static byte* convert_environment(Process* p, Eterm env);
+static int merge_global_environment(erts_osenv_t *env, Eterm key_value_pairs);
static char **convert_args(Eterm);
static void free_args(char **);
@@ -53,10 +54,13 @@ char *erts_default_arg0 = "default";
BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2)
{
+ BIF_RETTYPE ret;
Port *port;
Eterm res;
char *str;
int err_type, err_num;
+ ErtsLinkData *ldp;
+ ErtsLink *lnk;
port = open_port(BIF_P, BIF_ARG_1, BIF_ARG_2, &err_type, &err_num);
if (!port) {
@@ -71,13 +75,23 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
} else if (err_type == -2) {
str = erl_errno_id(err_num);
- res = erts_atom_put((byte *) str, strlen(str), ERTS_ATOM_ENC_LATIN1, 1);
+ res = erts_atom_put((byte *) str, sys_strlen(str), ERTS_ATOM_ENC_LATIN1, 1);
} else {
res = am_einval;
}
BIF_RET(res);
}
+ ldp = erts_link_create(ERTS_LNK_TYPE_PORT, BIF_P->common.id, port->common.id);
+ ASSERT(ldp->a.other.item == port->common.id);
+ ASSERT(ldp->b.other.item == BIF_P->common.id);
+ /*
+ * This link should not already be present, but can potentially
+ * due to id wrapping...
+ */
+ lnk = erts_link_tree_lookup_insert(&ERTS_P_LINKS(BIF_P), &ldp->a);
+ erts_link_tree_insert(&ERTS_P_LINKS(port), &ldp->b);
+
if (port->drv_ptr->flags & ERL_DRV_FLAG_USE_INIT_ACK) {
/* Copied from erl_port_task.c */
@@ -86,39 +100,30 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2)
erts_make_ref_in_array(port->async_open_port->ref);
port->async_open_port->to = BIF_P->common.id;
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK);
- if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
- /* need to exit caller instead */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK);
- KILL_CATCHES(BIF_P);
- BIF_P->freason = EXC_EXIT;
- erts_port_release(port);
- BIF_RET(am_badarg);
- }
-
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(BIF_P);
- BIF_P->msg.save = BIF_P->msg.last;
-
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ /*
+ * We unconditionaly *must* do a receive on a message
+ * containing the reference after this...
+ */
+ ERTS_RECV_MARK_SAVE(BIF_P);
+ ERTS_RECV_MARK_SET(BIF_P);
res = erts_proc_store_ref(BIF_P, port->async_open_port->ref);
} else {
res = port->common.id;
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
}
- erts_add_link(&ERTS_P_LINKS(port), LINK_PID, BIF_P->common.id);
- erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, port->common.id);
-
if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS))
- trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK, BIF_P,
+ trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_P,
am_link, port->common.id);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ ERTS_BIF_PREP_RET(ret, res);
erts_port_release(port);
- BIF_RET(res);
+ if (lnk)
+ erts_link_release(lnk);
+
+ return ret;
}
static ERTS_INLINE Port *
@@ -195,7 +200,6 @@ BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3)
#endif
switch (erts_port_output(BIF_P, flags, prt, prt->common.id, BIF_ARG_2, &ref)) {
- case ERTS_PORT_OP_CALLER_EXIT:
case ERTS_PORT_OP_BADARG:
case ERTS_PORT_OP_DROPPED:
ERTS_BIF_PREP_RET(res, am_badarg);
@@ -214,7 +218,7 @@ BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3)
ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE));
/* Fall through... */
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
ERTS_BIF_PREP_RET(res, ref);
break;
case ERTS_PORT_OP_DONE:
@@ -254,13 +258,12 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3)
op = (unsigned int) uint_op;
switch (erts_port_call(BIF_P, prt, op, BIF_ARG_3, &retval)) {
- case ERTS_PORT_OP_CALLER_EXIT:
case ERTS_PORT_OP_DROPPED:
case ERTS_PORT_OP_BADARG:
retval = am_badarg;
break;
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
break;
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -271,14 +274,9 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3)
break;
}
- state = erts_smp_atomic32_read_acqb(&BIF_P->state);
- if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
- if (state & ERTS_PSFLG_PENDING_EXIT)
- erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
+ state = erts_atomic32_read_acqb(&BIF_P->state);
+ if (state & ERTS_PSFLG_EXITING)
ERTS_BIF_EXITED(BIF_P);
- }
BIF_RET(retval);
}
@@ -304,13 +302,12 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3)
op = (unsigned int) uint_op;
switch (erts_port_control(BIF_P, prt, op, BIF_ARG_3, &retval)) {
- case ERTS_PORT_OP_CALLER_EXIT:
case ERTS_PORT_OP_BADARG:
case ERTS_PORT_OP_DROPPED:
retval = am_badarg;
break;
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
break;
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -321,14 +318,9 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3)
break;
}
- state = erts_smp_atomic32_read_acqb(&BIF_P->state);
- if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
- if (state & ERTS_PSFLG_PENDING_EXIT)
- erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
+ state = erts_atomic32_read_acqb(&BIF_P->state);
+ if (state & ERTS_PSFLG_EXITING)
ERTS_BIF_EXITED(BIF_P);
- }
BIF_RET(retval);
}
@@ -351,12 +343,11 @@ BIF_RETTYPE erts_internal_port_close_1(BIF_ALIST_1)
BIF_RET(am_badarg);
switch (erts_port_exit(BIF_P, 0, prt, BIF_P->common.id, am_normal, &ref)) {
- case ERTS_PORT_OP_CALLER_EXIT:
case ERTS_PORT_OP_BADARG:
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_badarg);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_RET(ref);
case ERTS_PORT_OP_DONE:
BIF_RET(am_true);
@@ -384,12 +375,11 @@ BIF_RETTYPE erts_internal_port_connect_2(BIF_ALIST_2)
#endif
switch (erts_port_connect(BIF_P, 0, prt, BIF_P->common.id, BIF_ARG_2, &ref)) {
- case ERTS_PORT_OP_CALLER_EXIT:
case ERTS_PORT_OP_BADARG:
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_badarg);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(ref));
+ ASSERT(is_internal_ordinary_ref(ref));
BIF_RET(ref);
break;
case ERTS_PORT_OP_DONE:
@@ -422,13 +412,12 @@ BIF_RETTYPE erts_internal_port_info_1(BIF_ALIST_1)
}
switch (erts_port_info(BIF_P, prt, THE_NON_VALUE, &retval)) {
- case ERTS_PORT_OP_CALLER_EXIT:
case ERTS_PORT_OP_BADARG:
BIF_RET(am_badarg);
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_undefined);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
BIF_RET(retval);
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -461,13 +450,12 @@ BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2)
}
switch (erts_port_info(BIF_P, prt, BIF_ARG_2, &retval)) {
- case ERTS_PORT_OP_CALLER_EXIT:
case ERTS_PORT_OP_BADARG:
BIF_RET(am_badarg);
case ERTS_PORT_OP_DROPPED:
BIF_RET(am_undefined);
case ERTS_PORT_OP_SCHEDULED:
- ASSERT(is_internal_ref(retval));
+ ASSERT(is_internal_ordinary_ref(retval));
BIF_RET(retval);
case ERTS_PORT_OP_DONE:
ASSERT(is_not_internal_ref(retval));
@@ -511,39 +499,35 @@ cleanup_old_port_data(erts_aint_t data)
ASSERT(is_immed((Eterm) data));
}
else {
-#ifdef ERTS_SMP
ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
size_t size;
- ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+ ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
size = sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm);
erts_schedule_thr_prgr_later_cleanup_op(free_port_data_heap,
(void *) pdhp,
&pdhp->later_op,
size);
-#else
- free_port_data_heap((void *) data);
-#endif
}
}
void
erts_init_port_data(Port *prt)
{
- erts_smp_atomic_init_nob(&prt->data, (erts_aint_t) am_undefined);
+ erts_atomic_init_nob(&prt->data, (erts_aint_t) am_undefined);
}
void
erts_cleanup_port_data(Port *prt)
{
ASSERT(erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_INVALID_LOOKUP);
- cleanup_old_port_data(erts_smp_atomic_xchg_nob(&prt->data,
+ cleanup_old_port_data(erts_atomic_xchg_nob(&prt->data,
(erts_aint_t) NULL));
}
Uint
erts_port_data_size(Port *prt)
{
- erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data);
+ erts_aint_t data = erts_atomic_read_ddrb(&prt->data);
if ((data & 0x3) != 0) {
ASSERT(is_immed((Eterm) (UWord) data));
@@ -558,7 +542,7 @@ erts_port_data_size(Port *prt)
ErlOffHeap *
erts_port_data_offheap(Port *prt)
{
- erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data);
+ erts_aint_t data = erts_atomic_read_ddrb(&prt->data);
if ((data & 0x3) != 0) {
ASSERT(is_immed((Eterm) (UWord) data));
@@ -603,11 +587,11 @@ BIF_RETTYPE port_set_data_2(BIF_ALIST_2)
ASSERT((data & 0x3) == 0);
}
- data = erts_smp_atomic_xchg_wb(&prt->data, data);
+ data = erts_atomic_xchg_wb(&prt->data, data);
if (data == (erts_aint_t)NULL) {
/* Port terminated by racing thread */
- data = erts_smp_atomic_xchg_wb(&prt->data, data);
+ data = erts_atomic_xchg_wb(&prt->data, data);
ASSERT(data != (erts_aint_t)NULL);
cleanup_old_port_data(data);
BIF_ERROR(BIF_P, BADARG);
@@ -630,7 +614,7 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
if (!prt)
BIF_ERROR(BIF_P, BADARG);
- data = erts_smp_atomic_read_ddrb(&prt->data);
+ data = erts_atomic_read_ddrb(&prt->data);
if (data == (erts_aint_t)NULL)
BIF_ERROR(BIF_P, BADARG); /* Port terminated by racing thread */
@@ -647,6 +631,27 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
BIF_RET(res);
}
+Eterm erts_port_data_read(Port* prt)
+{
+ Eterm res;
+ erts_aint_t data;
+
+ data = erts_atomic_read_ddrb(&prt->data);
+ if (data == (erts_aint_t)NULL)
+ return am_undefined; /* Port terminated by racing thread */
+
+ if ((data & 0x3) != 0) {
+ res = (Eterm) (UWord) data;
+ ASSERT(is_immed(res));
+ }
+ else {
+ ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
+ res = pdhp->data;
+ }
+ return res;
+}
+
+
/*
* Open a port. Most of the work is not done here but rather in
* the file io.c.
@@ -659,6 +664,7 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
static Port *
open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
{
+ int merged_environment = 0;
Sint i;
Eterm option;
Uint arity;
@@ -680,12 +686,13 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
opts.read_write = 0;
opts.hide_window = 0;
opts.wd = NULL;
- opts.envir = NULL;
opts.exit_status = 0;
opts.overlapped_io = 0;
opts.spawn_type = ERTS_SPAWN_ANY;
opts.argv = NULL;
opts.parallelism = erts_port_parallelism;
+ erts_osenv_init(&opts.envir);
+
linebuf = 0;
*err_nump = 0;
@@ -726,11 +733,16 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
goto badarg;
}
} else if (option == am_env) {
- byte* bytes;
- if ((bytes = convert_environment(p, *tp)) == NULL) {
- goto badarg;
+ if (merged_environment) {
+ /* Ignore previous env option */
+ erts_osenv_clear(&opts.envir);
+ }
+
+ merged_environment = 1;
+
+ if (merge_global_environment(&opts.envir, *tp)) {
+ goto badarg;
}
- opts.envir = (char *) bytes;
} else if (option == am_args) {
char **av;
char **oav = opts.argv;
@@ -815,6 +827,12 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
if((linebuf && opts.packet_bytes) ||
(opts.redir_stderr && !opts.use_stdio)) {
goto badarg;
+}
+
+ /* If we lacked an env option, fill in the global environment without
+ * changes. */
+ if (!merged_environment) {
+ merge_global_environment(&opts.envir, NIL);
}
/*
@@ -925,7 +943,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
port = erts_open_driver(driver, p->common.id, name_buf, &opts, err_typep, err_nump);
#ifdef USE_VM_PROBES
@@ -942,7 +960,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
if (port && IS_TRACED_FL(port, F_TRACE_PORTS))
trace_port(port, am_getting_linked, p->common.id);
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
@@ -964,6 +982,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
erts_atomic32_read_bor_relb(&port->state, sflgs);
do_return:
+ erts_osenv_clear(&opts.envir);
if (name_buf)
erts_free(ERTS_ALC_T_TMP, (void *) name_buf);
if (opts.argv) {
@@ -983,6 +1002,45 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
goto do_return;
}
+/* Merges the the global environment and the given {Key, Value} list into env,
+ * unsetting all keys whose value is either 'false' or NIL. The behavior on
+ * NIL is undocumented and perhaps surprising, but the previous implementation
+ * worked in this manner. */
+static int merge_global_environment(erts_osenv_t *env, Eterm key_value_pairs) {
+ const erts_osenv_t *global_env = erts_sys_rlock_global_osenv();
+ erts_osenv_merge(env, global_env, 0);
+ erts_sys_runlock_global_osenv();
+
+ while (is_list(key_value_pairs)) {
+ Eterm *cell, *tuple;
+
+ cell = list_val(key_value_pairs);
+
+ if(!is_tuple_arity(CAR(cell), 2)) {
+ return -1;
+ }
+
+ tuple = tuple_val(CAR(cell));
+ key_value_pairs = CDR(cell);
+
+ if(is_nil(tuple[2]) || tuple[2] == am_false) {
+ if(erts_osenv_unset_term(env, tuple[1]) < 0) {
+ return -1;
+ }
+ } else {
+ if(erts_osenv_put_term(env, tuple[1], tuple[2]) < 0) {
+ return -1;
+ }
+ }
+ }
+
+ if(!is_nil(key_value_pairs)) {
+ return -1;
+ }
+
+ return 0;
+}
+
/* Arguments can be given i unicode and as raw binaries, convert filename is used to convert */
static char **convert_args(Eterm l)
{
@@ -1029,75 +1087,6 @@ static void free_args(char **av)
}
erts_free(ERTS_ALC_T_TMP, av);
}
-
-
-static byte* convert_environment(Process* p, Eterm env)
-{
- Eterm all;
- Eterm* temp_heap;
- Eterm* hp;
- Uint heap_size;
- Sint n;
- Sint size;
- byte* bytes;
- int encoding = erts_get_native_filename_encoding();
-
- if ((n = erts_list_length(env)) < 0) {
- return NULL;
- }
- heap_size = 2*(5*n+1);
- temp_heap = hp = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, heap_size*sizeof(Eterm));
- bytes = NULL; /* Indicating error */
-
- /*
- * All errors below are handled by jumping to 'done', to ensure that the memory
- * gets deallocated. Do NOT return directly from this function.
- */
-
- all = CONS(hp, make_small(0), NIL);
- hp += 2;
-
- while(is_list(env)) {
- Eterm tmp;
- Eterm* tp;
-
- tmp = CAR(list_val(env));
- if (is_not_tuple_arity(tmp, 2)) {
- goto done;
- }
- tp = tuple_val(tmp);
- tmp = CONS(hp, make_small(0), NIL);
- hp += 2;
- if (tp[2] != am_false) {
- tmp = CONS(hp, tp[2], tmp);
- hp += 2;
- }
- tmp = CONS(hp, make_small('='), tmp);
- hp += 2;
- tmp = CONS(hp, tp[1], tmp);
- hp += 2;
- all = CONS(hp, tmp, all);
- hp += 2;
- env = CDR(list_val(env));
- }
- if (is_not_nil(env)) {
- goto done;
- }
-
- if ((size = erts_native_filename_need(all,encoding)) < 0) {
- goto done;
- }
-
- /*
- * Put the result in a binary (no risk for a memory leak that way).
- */
- (void) erts_new_heap_binary(p, NULL, size, &bytes);
- erts_native_filename_put(all,encoding,bytes);
-
- done:
- erts_free(ERTS_ALC_T_TMP, temp_heap);
- return bytes;
-}
/* ------------ decode_packet() and friends: */
@@ -1148,7 +1137,7 @@ http_bld_string(struct packet_callback_args* pca, Uint **hpp, Uint *szp,
ErlHeapBin* bin = (ErlHeapBin*) *hpp;
bin->thing_word = header_heap_bin(len);
bin->size = len;
- memcpy(bin->data, str, len);
+ sys_memcpy(bin->data, str, len);
*hpp += size;
}
}
@@ -1326,9 +1315,9 @@ int ssl_tls_erl(void* arg, unsigned type, unsigned major, unsigned minor,
Eterm bin = new_binary(pca->p, NULL, plen+len);
byte* bin_ptr = binary_bytes(bin);
- memcpy(bin_ptr+plen, buf, len);
+ sys_memcpy(bin_ptr+plen, buf, len);
if (plen) {
- memcpy(bin_ptr, prefix, plen);
+ sys_memcpy(bin_ptr, prefix, plen);
}
/* {ssl_tls,NIL,ContentType,{Major,Minor},Bin} */
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index ff7746ce1d..bbc64eb9aa 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -64,12 +64,43 @@ static void erts_erts_pcre_stack_free(void *ptr) {
erts_free(ERTS_ALC_T_RE_STACK,ptr);
}
+#define ERTS_PCRE_STACK_MARGIN (10*1024)
+
+# define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit())
+
+static int
+stack_guard_downwards(void)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+
+ ASSERT(limit);
+
+ return erts_check_below_limit(&c, limit + ERTS_PCRE_STACK_MARGIN);
+}
+
+static int
+stack_guard_upwards(void)
+{
+ char *limit = ERTS_STACK_LIMIT;
+ char c;
+
+ ASSERT(limit);
+
+ return erts_check_above_limit(&c, limit - ERTS_PCRE_STACK_MARGIN);
+}
+
void erts_init_bif_re(void)
{
+ char c;
erts_pcre_malloc = &erts_erts_pcre_malloc;
erts_pcre_free = &erts_erts_pcre_free;
erts_pcre_stack_malloc = &erts_erts_pcre_stack_malloc;
erts_pcre_stack_free = &erts_erts_pcre_stack_free;
+ if ((char *) erts_ptr_id(&c) > ERTS_STACK_LIMIT)
+ erts_pcre_stack_guard = stack_guard_downwards;
+ else
+ erts_pcre_stack_guard = stack_guard_upwards;
default_table = NULL; /* ISO8859-1 default, forced into pcre */
max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
@@ -477,6 +508,17 @@ build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, con
* Compile BIFs
*/
+BIF_RETTYPE
+re_version_0(BIF_ALIST_0)
+{
+ Eterm ret;
+ size_t version_size = 0;
+ byte *version = (byte *) erts_pcre_version();
+ version_size = sys_strlen((const char *) version);
+ ret = new_binary(BIF_P, version, version_size);
+ BIF_RET(ret);
+}
+
static BIF_RETTYPE
re_compile(Process* p, Eterm arg1, Eterm arg2)
{
@@ -600,10 +642,11 @@ static void cleanup_restart_context(RestartContext *rc)
}
}
-static void cleanup_restart_context_bin(Binary *bp)
+static int cleanup_restart_context_bin(Binary *bp)
{
RestartContext *rc = ERTS_MAGIC_BIN_DATA(bp);
cleanup_restart_context(rc);
+ return 1;
}
/*
@@ -955,7 +998,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
has_dupnames = ((options & PCRE_DUPNAMES) != 0);
for(i=0;i<top;++i) {
- if (last == NULL || !has_dupnames || strcmp((char *) last+2,(char *) nametable+2)) {
+ if (last == NULL || !has_dupnames || sys_strcmp((char *) last+2,(char *) nametable+2)) {
ASSERT(ri->num_spec >= 0);
++(ri->num_spec);
if(ri->num_spec > sallocated) {
@@ -1005,7 +1048,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
(tmpbsiz = ap->len + 1));
}
}
- memcpy(tmpb,ap->name,ap->len);
+ sys_memcpy(tmpb,ap->name,ap->len);
tmpb[ap->len] = '\0';
} else {
ErlDrvSizeT slen;
@@ -1167,7 +1210,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
erts_pcre_fullinfo(result, NULL, PCRE_INFO_CAPTURECOUNT, &capture_count);
ovsize = 3*(capture_count+1);
restart.code = erts_alloc(ERTS_ALC_T_RE_SUBJECT, code_size);
- memcpy(restart.code, result, code_size);
+ sys_memcpy(restart.code, result, code_size);
erts_pcre_free(result);
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
/*unicode = (pflags & PARSE_FLAG_UNICODE) ? 1 : 0;*/
@@ -1209,7 +1252,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
BIF_ERROR(p, BADARG);
}
restart.code = erts_alloc(ERTS_ALC_T_RE_SUBJECT, code_size);
- memcpy(restart.code, code_tmp, code_size);
+ sys_memcpy(restart.code, code_tmp, code_size);
erts_free_aligned_binary_bytes(temp_alloc);
}
@@ -1319,17 +1362,17 @@ handle_iolist:
Binary *mbp = erts_create_magic_binary(sizeof(RestartContext),
cleanup_restart_context_bin);
RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp);
- Eterm magic_bin;
+ Eterm magic_ref;
Eterm *hp;
- memcpy(restartp,&restart,sizeof(RestartContext));
+ sys_memcpy(restartp,&restart,sizeof(RestartContext));
BUMP_ALL_REDS(p);
- hp = HAlloc(p, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(p), mbp);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ magic_ref = erts_mk_magic_ref(&hp, &MSO(p), mbp);
BIF_TRAP3(&re_exec_trap_export,
p,
arg1,
arg2 /* To avoid GC of precompiled code, XXX: not utilized yet */,
- magic_bin);
+ magic_ref);
}
res = build_exec_return(p, rc, &restart, arg1);
@@ -1366,9 +1409,7 @@ static BIF_RETTYPE re_exec_trap(BIF_ALIST_3)
Uint loop_limit_tmp;
Eterm res;
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_3));
-
- mbp = ((ProcBin *) binary_val(BIF_ARG_3))->val;
+ mbp = erts_magic_ref2bin(BIF_ARG_3);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_restart_context_bin);
@@ -1476,7 +1517,7 @@ re_inspect_2(BIF_ALIST_2)
last = NULL;
name = nametable;
for(i=0;i<top;++i) {
- if (last == NULL || !has_dupnames || strcmp((char *) last+2,
+ if (last == NULL || !has_dupnames || sys_strcmp((char *) last+2,
(char *) name+2)) {
++num_names;
}
@@ -1490,9 +1531,9 @@ re_inspect_2(BIF_ALIST_2)
name = nametable;
j = 0;
for(i=0;i<top;++i) {
- if (last == NULL || !has_dupnames || strcmp((char *) last+2,
+ if (last == NULL || !has_dupnames || sys_strcmp((char *) last+2,
(char *) name+2)) {
- tmp_vec[j++] = new_binary(BIF_P, (byte *) name+2, strlen((char *) name+2));
+ tmp_vec[j++] = new_binary(BIF_P, (byte *) name+2, sys_strlen((char *) name+2));
}
last = name;
name += entrysize;
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 66e5146da0..711e62c795 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -40,6 +40,7 @@
#include "erl_binary.h"
#include "erl_thr_progress.h"
#include "erl_bif_unique.h"
+#include "erl_proc_sig_queue.h"
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
@@ -60,10 +61,8 @@ static struct { /* Protected by code write permission */
int local;
BpFunctions f; /* Local functions */
BpFunctions e; /* Export entries */
-#ifdef ERTS_SMP
Process* stager;
ErtsThrPrgrLaterOp lop;
-#endif
} finish_bp;
static Eterm
@@ -71,9 +70,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
static int
erts_set_tracing_event_pattern(Eterm event, Binary*, int on);
-#ifdef ERTS_SMP
static void smp_bp_finisher(void* arg);
-#endif
static BIF_RETTYPE
system_monitor(Process *p, Eterm monitor_pid, Eterm list);
@@ -125,7 +122,6 @@ erts_internal_trace_pattern_3(BIF_ALIST_3)
static Eterm
trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
{
- DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
int i;
int matches = -1;
int specified = 0;
@@ -308,30 +304,30 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
matches = 0;
} else if (is_tuple(MFA)) {
+ ErtsCodeMFA mfa;
Eterm *tp = tuple_val(MFA);
if (tp[0] != make_arityval(3)) {
goto error;
}
- mfa[0] = tp[1];
- mfa[1] = tp[2];
- mfa[2] = tp[3];
- if (!is_atom(mfa[0]) || !is_atom(mfa[1]) ||
- (!is_small(mfa[2]) && mfa[2] != am_Underscore)) {
+ if (!is_atom(tp[1]) || !is_atom(tp[2]) ||
+ (!is_small(tp[3]) && tp[3] != am_Underscore)) {
goto error;
}
- for (i = 0; i < 3 && mfa[i] != am_Underscore; i++, specified++) {
+ for (i = 0; i < 3 && tp[i+1] != am_Underscore; i++, specified++) {
/* Empty loop body */
}
for (i = specified; i < 3; i++) {
- if (mfa[i] != am_Underscore) {
+ if (tp[i+1] != am_Underscore) {
goto error;
}
}
- if (is_small(mfa[2])) {
- mfa[2] = signed_val(mfa[2]);
+ mfa.module = tp[1];
+ mfa.function = tp[2];
+ if (specified == 3) {
+ mfa.arity = signed_val(tp[3]);
}
- matches = erts_set_trace_pattern(p, mfa, specified,
+ matches = erts_set_trace_pattern(p, &mfa, specified,
match_prog_set, match_prog_set,
on, flags, meta_tracer, 0);
} else if (is_atom(MFA)) {
@@ -343,11 +339,9 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
error:
MatchSetUnref(match_prog_set);
- UnUseTmpHeap(3,p);
ERTS_TRACER_CLEAR(&meta_tracer);
-#ifdef ERTS_SMP
if (finish_bp.current >= 0) {
ASSERT(matches >= 0);
ASSERT(finish_bp.stager == NULL);
@@ -357,7 +351,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD_RETURN(p, make_small(matches));
}
-#endif
erts_release_code_write_permission();
@@ -369,7 +362,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
}
-#ifdef ERTS_SMP
static void smp_bp_finisher(void* null)
{
if (erts_finish_breakpointing()) { /* Not done */
@@ -382,15 +374,14 @@ static void smp_bp_finisher(void* null)
finish_bp.stager = NULL;
#endif
erts_release_code_write_permission();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
if (!ERTS_PROC_IS_EXITING(p)) {
erts_resume(p, ERTS_PROC_LOCK_STATUS);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
erts_proc_dec_refc(p);
}
}
-#endif /* ERTS_SMP */
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
@@ -399,8 +390,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct trace_pattern_flags *trace_pattern_flags,
ErtsTracer *meta_tracer)
{
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() ||
- erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_has_code_write_permission() ||
+ erts_thr_progress_is_blocking());
if (trace_pattern_is_on)
*trace_pattern_is_on = erts_default_trace_pattern_is_on;
if (match_spec)
@@ -415,8 +406,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
int erts_is_default_trace_enabled(void)
{
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() ||
- erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_has_code_write_permission() ||
+ erts_thr_progress_is_blocking());
return erts_default_trace_pattern_is_on;
}
@@ -512,7 +503,7 @@ start_trace(Process *c_p, ErtsTracer tracer,
&& !ERTS_TRACER_COMPARE(ERTS_TRACER(port), tracer)) {
/* This tracee is already being traced, and not by the
* tracer to be */
- if (erts_is_tracer_enabled(tracer, common)) {
+ if (erts_is_tracer_enabled(ERTS_TRACER(port), common)) {
/* The tracer is still in use */
return 1;
}
@@ -545,9 +536,7 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
int matches = 0;
Uint mask = 0;
int cpu_ts = 0;
-#ifdef ERTS_SMP
int system_blocked = 0;
-#endif
if (! erts_trace_flags(list, &mask, &tracer, &cpu_ts)) {
BIF_ERROR(p, BADARG);
@@ -622,13 +611,13 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
goto error;
if (start_trace(tracee_p, tracer, &tracee_p->common, on, mask)) {
- erts_smp_proc_unlock(tracee_p,
+ erts_proc_unlock(tracee_p,
(tracee_p == p
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
goto already_traced;
}
- erts_smp_proc_unlock(tracee_p,
+ erts_proc_unlock(tracee_p,
(tracee_p == p
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
@@ -654,12 +643,12 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
SysTimespec tp;
int i;
- if (sys_get_proc_cputime(start, tp) < 0)
+ if (sys_get_cputime(start, tp) < 0)
goto error;
start = ((SysCpuTime)tp.tv_sec * 1000000000LL) +
(SysCpuTime)tp.tv_nsec;
for (i = 0; i < 100; i++)
- sys_get_proc_cputime(stop, tp);
+ sys_get_cputime(stop, tp);
stop = ((SysCpuTime)tp.tv_sec * 1000000000LL) +
(SysCpuTime)tp.tv_nsec;
if (start == 0) goto error;
@@ -701,11 +690,9 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
mods = 1;
}
-#ifdef ERTS_SMP
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
system_blocked = 1;
-#endif
ok = 1;
if (procs || mods) {
@@ -715,8 +702,8 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
Process* tracee_p = erts_pix2proc(i);
if (! tracee_p)
continue;
- start_trace(p, tracer, &tracee_p->common, on, mask);
- matches++;
+ if (!start_trace(p, tracer, &tracee_p->common, on, mask))
+ matches++;
}
}
if (ports || mods) {
@@ -730,8 +717,8 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
state = erts_atomic32_read_nob(&tracee_port->state);
if (state & ERTS_PORT_SFLGS_DEAD)
continue;
- start_trace(p, tracer, &tracee_port->common, on, mask);
- matches++;
+ if (!start_trace(p, tracer, &tracee_port->common, on, mask))
+ matches++;
}
}
}
@@ -768,12 +755,10 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
goto error;
}
-#ifdef ERTS_SMP
if (system_blocked) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
-#endif
erts_release_code_write_permission();
ERTS_TRACER_CLEAR(&tracer);
@@ -787,12 +772,10 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
ERTS_TRACER_CLEAR(&tracer);
-#ifdef ERTS_SMP
if (system_blocked) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
-#endif
erts_release_code_write_permission();
BIF_ERROR(p, BADARG);
@@ -826,10 +809,129 @@ Eterm trace_info_2(BIF_ALIST_2)
BIF_ERROR(p, BADARG);
}
erts_release_code_write_permission();
+
+ if (is_value(res) && is_internal_ref(res))
+ BIF_TRAP1(erts_await_result, BIF_P, res);
+
BIF_RET(res);
}
static Eterm
+build_trace_flags_term(Eterm **hpp, Uint *szp, Uint trace_flags)
+{
+
+#define ERTS_TFLAG__(F, FN) \
+ if (trace_flags & F) { \
+ if (szp) \
+ sz += 2; \
+ if (hp) { \
+ res = CONS(hp, FN, res); \
+ hp += 2; \
+ } \
+ }
+
+ Eterm res;
+ Uint sz = 0;
+ Eterm *hp;
+
+ if (hpp) {
+ hp = *hpp;
+ res = NIL;
+ }
+ else {
+ hp = NULL;
+ res = THE_NON_VALUE;
+ }
+
+ ERTS_TFLAG__(F_NOW_TS, am_timestamp);
+ ERTS_TFLAG__(F_STRICT_MON_TS, am_strict_monotonic_timestamp);
+ ERTS_TFLAG__(F_MON_TS, am_monotonic_timestamp);
+ ERTS_TFLAG__(F_TRACE_SEND, am_send);
+ ERTS_TFLAG__(F_TRACE_RECEIVE, am_receive);
+ ERTS_TFLAG__(F_TRACE_SOS, am_set_on_spawn);
+ ERTS_TFLAG__(F_TRACE_CALLS, am_call);
+ ERTS_TFLAG__(F_TRACE_PROCS, am_procs);
+ ERTS_TFLAG__(F_TRACE_SOS1, am_set_on_first_spawn);
+ ERTS_TFLAG__(F_TRACE_SOL, am_set_on_link);
+ ERTS_TFLAG__(F_TRACE_SOL1, am_set_on_first_link);
+ ERTS_TFLAG__(F_TRACE_SCHED, am_running);
+ ERTS_TFLAG__(F_TRACE_SCHED_EXIT, am_exiting);
+ ERTS_TFLAG__(F_TRACE_GC, am_garbage_collection);
+ ERTS_TFLAG__(F_TRACE_ARITY_ONLY, am_arity);
+ ERTS_TFLAG__(F_TRACE_RETURN_TO, am_return_to);
+ ERTS_TFLAG__(F_TRACE_SILENT, am_silent);
+ ERTS_TFLAG__(F_TRACE_SCHED_NO, am_scheduler_id);
+ ERTS_TFLAG__(F_TRACE_PORTS, am_ports);
+ ERTS_TFLAG__(F_TRACE_SCHED_PORTS, am_running_ports);
+ ERTS_TFLAG__(F_TRACE_SCHED_PROCS, am_running_procs);
+
+ if (szp)
+ *szp += sz;
+
+ if (hpp)
+ *hpp = hp;
+
+ return res;
+
+#undef ERTS_TFLAG__
+}
+
+static Eterm
+trace_info_tracee(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
+{
+ ErlHeapFragment *bp;
+ Eterm *hp, res, key;
+ Uint sz;
+
+ *redsp = 1;
+
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return am_undefined;
+
+ key = (Eterm) arg;
+ sz = 3;
+
+ if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(c_p)))
+ erts_is_tracer_proc_enabled(c_p, ERTS_PROC_LOCK_MAIN,
+ &c_p->common);
+
+ switch (key) {
+ case am_tracer:
+
+ erts_build_tracer_to_term(NULL, NULL, &sz, ERTS_TRACER(c_p));
+ bp = new_message_buffer(sz);
+ hp = bp->mem;
+ res = erts_build_tracer_to_term(&hp, &bp->off_heap,
+ NULL, ERTS_TRACER(c_p));
+ if (res == am_false)
+ res = NIL;
+ break;
+
+ case am_flags:
+
+ build_trace_flags_term(NULL, &sz, ERTS_TRACE_FLAGS(c_p));
+ bp = new_message_buffer(sz);
+ hp = bp->mem;
+ res = build_trace_flags_term(&hp, NULL, ERTS_TRACE_FLAGS(c_p));
+ break;
+
+ default:
+
+ ERTS_INTERNAL_ERROR("Key not supported");
+ res = NIL;
+ bp = NULL;
+ hp = NULL;
+ break;
+ }
+
+ *redsp += 2;
+
+ res = TUPLE2(hp, key, res);
+ *bpp = bp;
+ return res;
+}
+
+static Eterm
trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
{
Eterm tracer;
@@ -863,24 +965,19 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
erts_port_release(tracee);
} else if (is_internal_pid(pid_spec)) {
- Process *tracee = erts_pid2proc_not_running(p, ERTS_PROC_LOCK_MAIN,
- pid_spec, ERTS_PROC_LOCK_MAIN);
-
- if (tracee == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, pid_spec, key);
+ Eterm ref;
- if (!tracee)
- return am_undefined;
+ if (key != am_flags && key != am_tracer)
+ goto error;
- if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
- erts_is_tracer_proc_enabled(tracee, ERTS_PROC_LOCK_MAIN,
- &tracee->common);
+ ref = erts_proc_sig_send_rpc_request(p, pid_spec, !0,
+ trace_info_tracee,
+ (void *) key);
- tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
- trace_flags = ERTS_TRACE_FLAGS(tracee);
+ if (is_non_value(ref))
+ return am_undefined;
- if (tracee != p)
- erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN);
+ return ref;
} else if (is_external_pid(pid_spec)
&& external_pid_dist_entry(pid_spec) == erts_this_dist_entry) {
return am_undefined;
@@ -890,48 +987,16 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
}
if (key == am_flags) {
- int num_flags = 21; /* MAXIMUM number of flags. */
- Uint needed = 3+2*num_flags;
- Eterm flag_list = NIL;
- Eterm* limit;
+ Eterm flag_list;
+ Uint sz = 3;
+ Eterm *hp;
-#define FLAG0(flag_mask,flag) \
- if (trace_flags & (flag_mask)) { flag_list = CONS(hp, flag, flag_list); hp += 2; } else {}
+ build_trace_flags_term(NULL, &sz, trace_flags);
+
+ hp = HAlloc(p, sz);
+
+ flag_list = build_trace_flags_term(&hp, NULL, trace_flags);
-#if defined(DEBUG)
- /*
- * Check num_flags if this assertion fires.
- */
-# define FLAG ASSERT(num_flags-- > 0); FLAG0
-#else
-# define FLAG FLAG0
-#endif
- hp = HAlloc(p, needed);
- limit = hp+needed;
- FLAG(F_NOW_TS, am_timestamp);
- FLAG(F_STRICT_MON_TS, am_strict_monotonic_timestamp);
- FLAG(F_MON_TS, am_monotonic_timestamp);
- FLAG(F_TRACE_SEND, am_send);
- FLAG(F_TRACE_RECEIVE, am_receive);
- FLAG(F_TRACE_SOS, am_set_on_spawn);
- FLAG(F_TRACE_CALLS, am_call);
- FLAG(F_TRACE_PROCS, am_procs);
- FLAG(F_TRACE_SOS1, am_set_on_first_spawn);
- FLAG(F_TRACE_SOL, am_set_on_link);
- FLAG(F_TRACE_SOL1, am_set_on_first_link);
- FLAG(F_TRACE_SCHED, am_running);
- FLAG(F_TRACE_SCHED_EXIT, am_exiting);
- FLAG(F_TRACE_GC, am_garbage_collection);
- FLAG(F_TRACE_ARITY_ONLY, am_arity);
- FLAG(F_TRACE_RETURN_TO, am_return_to);
- FLAG(F_TRACE_SILENT, am_silent);
- FLAG(F_TRACE_SCHED_NO, am_scheduler_id);
- FLAG(F_TRACE_PORTS, am_ports);
- FLAG(F_TRACE_SCHED_PORTS, am_running_ports);
- FLAG(F_TRACE_SCHED_PROCS, am_running_procs);
-#undef FLAG0
-#undef FLAG
- HRelease(p,limit,hp+3);
return TUPLE2(hp, key, flag_list);
} else if (key == am_tracer) {
if (tracer == am_false)
@@ -975,32 +1040,33 @@ static int function_is_traced(Process *p,
Export e;
Export* ep;
BeamInstr* pc;
+ ErtsCodeInfo *ci;
/* First look for an export entry */
- e.code[0] = mfa[0];
- e.code[1] = mfa[1];
- e.code[2] = mfa[2];
+ e.info.mfa.module = mfa[0];
+ e.info.mfa.function = mfa[1];
+ e.info.mfa.arity = mfa[2];
if ((ep = export_get(&e)) != NULL) {
- pc = ep->code+3;
+ pc = ep->beam;
if (ep->addressv[erts_active_code_ix()] == pc &&
- *pc != (BeamInstr) em_call_error_handler) {
+ ! BeamIsOpCode(*pc, op_call_error_handler)) {
int r = 0;
- ASSERT(*pc == (BeamInstr) em_apply_bif ||
- *pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ ASSERT(BeamIsOpCode(*pc, op_apply_bif) ||
+ BeamIsOpCode(*pc, op_i_generic_breakpoint));
- if (erts_is_trace_break(pc, ms, 0)) {
+ if (erts_is_trace_break(&ep->info, ms, 0)) {
return FUNC_TRACE_GLOBAL_TRACE;
}
- if (erts_is_trace_break(pc, ms, 1)) {
+ if (erts_is_trace_break(&ep->info, ms, 1)) {
r |= FUNC_TRACE_LOCAL_TRACE;
}
- if (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)) {
+ if (erts_is_mtrace_break(&ep->info, ms_meta, tracer_pid_meta)) {
r |= FUNC_TRACE_META_TRACE;
}
- if (erts_is_time_break(p, pc, call_time)) {
+ if (erts_is_time_break(p, &ep->info, call_time)) {
r |= FUNC_TRACE_TIME_TRACE;
}
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1008,15 +1074,15 @@ static int function_is_traced(Process *p,
}
/* OK, now look for breakpoint tracing */
- if ((pc = erts_find_local_func(mfa)) != NULL) {
+ if ((ci = erts_find_local_func(&e.info.mfa)) != NULL) {
int r =
- (erts_is_trace_break(pc, ms, 1)
+ (erts_is_trace_break(ci, ms, 1)
? FUNC_TRACE_LOCAL_TRACE : 0)
- | (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)
+ | (erts_is_mtrace_break(ci, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
- | (erts_is_count_break(pc, count)
+ | (erts_is_count_break(ci, count)
? FUNC_TRACE_COUNT_TRACE : 0)
- | (erts_is_time_break(p, pc, call_time)
+ | (erts_is_time_break(p, ci, call_time)
? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1056,21 +1122,20 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
mfa[1] = tp[2];
mfa[2] = signed_val(tp[3]);
-#ifdef ERTS_SMP
if ( (key == am_call_time) || (key == am_all)) {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
}
-#endif
+ erts_mtx_lock(&erts_dirty_bp_ix_mtx);
+
r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time);
-#ifdef ERTS_SMP
+ erts_mtx_unlock(&erts_dirty_bp_ix_mtx);
if ( (key == am_call_time) || (key == am_all)) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
-#endif
switch (r) {
case FUNC_TRACE_NOEXIST:
@@ -1350,7 +1415,7 @@ trace_info_event(Process* p, Eterm event, Eterm key)
#undef FUNC_TRACE_LOCAL_TRACE
int
-erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
+erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags flags,
ErtsTracer meta_tracer, int is_blocking)
@@ -1370,22 +1435,23 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
n = finish_bp.e.matched;
for (i = 0; i < n; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ ErtsCodeInfo *ci = fp[i].ci;
+ BeamInstr* pc = erts_codeinfo_to_code(ci);
+ Export* ep = ErtsContainerStruct(ci, Export, info);
if (on && !flags.breakpoint) {
/* Turn on global call tracing */
if (ep->addressv[code_ix] != pc) {
fp[i].mod->curr.num_traced_exports++;
#ifdef DEBUG
- pc[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+ ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI);
#endif
- pc[0] = (BeamInstr) BeamOp(op_jump_f);
- pc[1] = (BeamInstr) ep->addressv[code_ix];
+ ep->beam[0] = BeamOpCodeAddr(op_trace_jump_W);
+ ep->beam[1] = (BeamInstr) ep->addressv[code_ix];
}
- erts_set_call_trace_bif(pc, match_prog_set, 0);
+ erts_set_call_trace_bif(ci, match_prog_set, 0);
if (ep->addressv[code_ix] != pc) {
- pc[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+ ep->beam[0] = BeamOpCodeAddr(op_i_generic_breakpoint);
}
} else if (!on && flags.breakpoint) {
/* Turn off breakpoint tracing -- nothing to do here. */
@@ -1394,9 +1460,9 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
* Turn off global tracing, either explicitly or implicitly
* before turning on breakpoint tracing.
*/
- erts_clear_call_trace_bif(pc, 0);
- if (pc[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ erts_clear_call_trace_bif(ci, 0);
+ if (BeamIsOpCode(ep->beam[0], op_i_generic_breakpoint)) {
+ ep->beam[0] = BeamOpCodeAddr(op_trace_jump_W);
}
}
}
@@ -1406,68 +1472,76 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
*/
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- int j;
-
+
if (!ExportIsBuiltIn(ep)) {
continue;
}
-
+
if (bif_table[i].f == bif_table[i].traced) {
/* Trace wrapper same as regular function - untraceable */
continue;
}
-
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j == specified) {
- BeamInstr* pc = (BeamInstr *)bif_export[i]->code + 3;
- if (! flags.breakpoint) { /* Export entry call trace */
- if (on) {
- erts_clear_call_trace_bif(pc, 1);
- erts_clear_mtrace_bif(pc);
- erts_set_call_trace_bif(pc, match_prog_set, 0);
- } else { /* off */
- erts_clear_call_trace_bif(pc, 0);
- }
- matches++;
- } else { /* Breakpoint call trace */
- int m = 0;
-
- if (on) {
- if (flags.local) {
- erts_clear_call_trace_bif(pc, 0);
- erts_set_call_trace_bif(pc, match_prog_set, 1);
- m = 1;
- }
- if (flags.meta) {
- erts_set_mtrace_bif(pc, meta_match_prog_set,
- meta_tracer);
- m = 1;
- }
- if (flags.call_time) {
- erts_set_time_trace_bif(pc, on);
- /* I don't want to remove any other tracers */
- m = 1;
- }
- } else { /* off */
- if (flags.local) {
- erts_clear_call_trace_bif(pc, 1);
- m = 1;
- }
- if (flags.meta) {
- erts_clear_mtrace_bif(pc);
- m = 1;
- }
- if (flags.call_time) {
- erts_clear_time_trace_bif(pc);
- m = 1;
- }
- }
- matches += m;
- }
- }
+ switch (specified) {
+ case 3:
+ if (mfa->arity != ep->info.mfa.arity)
+ continue;
+ case 2:
+ if (mfa->function != ep->info.mfa.function)
+ continue;
+ case 1:
+ if (mfa->module != ep->info.mfa.module)
+ continue;
+ case 0:
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ if (! flags.breakpoint) { /* Export entry call trace */
+ if (on) {
+ erts_clear_call_trace_bif(&ep->info, 1);
+ erts_clear_mtrace_bif(&ep->info);
+ erts_set_call_trace_bif(&ep->info, match_prog_set, 0);
+ } else { /* off */
+ erts_clear_call_trace_bif(&ep->info, 0);
+ }
+ matches++;
+ } else { /* Breakpoint call trace */
+ int m = 0;
+
+ if (on) {
+ if (flags.local) {
+ erts_clear_call_trace_bif(&ep->info, 0);
+ erts_set_call_trace_bif(&ep->info, match_prog_set, 1);
+ m = 1;
+ }
+ if (flags.meta) {
+ erts_set_mtrace_bif(&ep->info, meta_match_prog_set,
+ meta_tracer);
+ m = 1;
+ }
+ if (flags.call_time) {
+ erts_set_time_trace_bif(&ep->info, on);
+ /* I don't want to remove any other tracers */
+ m = 1;
+ }
+ } else { /* off */
+ if (flags.local) {
+ erts_clear_call_trace_bif(&ep->info, 1);
+ m = 1;
+ }
+ if (flags.meta) {
+ erts_clear_mtrace_bif(&ep->info);
+ m = 1;
+ }
+ if (flags.call_time) {
+ erts_clear_time_trace_bif(&ep->info);
+ m = 1;
+ }
+ }
+ matches += m;
+ }
}
/*
@@ -1511,17 +1585,13 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
finish_bp.install = on;
finish_bp.local = flags.breakpoint;
-#ifdef ERTS_SMP
if (is_blocking) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
-#endif
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
while (erts_finish_breakpointing()) {
/* Empty loop body */
}
-#ifdef ERTS_SMP
finish_bp.current = -1;
}
-#endif
if (flags.breakpoint) {
matches += finish_bp.f.matched;
@@ -1556,11 +1626,6 @@ erts_set_tracing_event_pattern(Eterm event, Binary* match_spec, int on)
finish_bp.f.matched = 0;
finish_bp.f.matching = NULL;
-#ifndef ERTS_SMP
- while (erts_finish_breakpointing()) {
- /* Empty loop body */
- }
-#endif
return 1;
}
@@ -1579,7 +1644,7 @@ consolidate_event_tracing(ErtsTracingEvent te[])
int
erts_finish_breakpointing(void)
{
- ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ ERTS_LC_ASSERT(erts_has_code_write_permission());
/*
* Memory barriers will be issued for all schedulers *before*
@@ -1669,10 +1734,9 @@ install_exp_breakpoints(BpFunctions* f)
Uint i;
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- ep->addressv[code_ix] = pc;
+ ep->addressv[code_ix] = ep->beam;
}
}
@@ -1685,14 +1749,13 @@ uninstall_exp_breakpoints(BpFunctions* f)
Uint i;
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- if (ep->addressv[code_ix] != pc) {
+ if (ep->addressv[code_ix] != ep->beam) {
continue;
}
- ASSERT(*pc == (BeamInstr) BeamOp(op_jump_f));
- ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
+ ASSERT(BeamIsOpCode(ep->beam[0], op_trace_jump_W));
+ ep->addressv[code_ix] = (BeamInstr *) ep->beam[1];
}
}
@@ -1705,15 +1768,14 @@ clean_export_entries(BpFunctions* f)
Uint i;
for (i = 0; i < ne; i++) {
- BeamInstr* pc = fp[i].pc;
- Export* ep = ErtsContainerStruct(pc, Export, code[3]);
+ Export* ep = ErtsContainerStruct(fp[i].ci, Export, info);
- if (ep->addressv[code_ix] == pc) {
+ if (ep->addressv[code_ix] == ep->beam) {
continue;
}
- if (*pc == (BeamInstr) BeamOp(op_jump_f)) {
- ep->code[3] = (BeamInstr) 0;
- ep->code[4] = (BeamInstr) 0;
+ if (BeamIsOpCode(ep->beam[0], op_trace_jump_W)) {
+ ep->beam[0] = (BeamInstr) 0;
+ ep->beam[1] = (BeamInstr) 0;
}
}
}
@@ -1725,11 +1787,11 @@ setup_bif_trace(void)
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- GenericBp* g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ GenericBp* g = ep->info.u.gen_bp;
if (g) {
if (ExportIsBuiltIn(ep)) {
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[i].traced;
+ ASSERT(ep->beam[1]);
+ ep->beam[1] = (BeamInstr) bif_table[i].traced;
}
}
}
@@ -1743,12 +1805,11 @@ reset_bif_trace(void)
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- BeamInstr* pc = ep->code+3;
- GenericBp* g = (GenericBp *) pc[-4];
+ GenericBp* g = ep->info.u.gen_bp;
if (g && g->data[active].flags == 0) {
if (ExportIsBuiltIn(ep)) {
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[i].f;
+ ASSERT(ep->beam[1]);
+ ep->beam[1] = (BeamInstr) bif_table[i].f;
}
}
}
@@ -1828,9 +1889,6 @@ Eterm erts_seq_trace(Process *p, Eterm arg1, Eterm arg2,
return old_value;
}
else if (arg1 == am_label) {
- if (! is_small(arg2)) {
- return THE_NON_VALUE;
- }
new_seq_trace_token(p);
if (build_result) {
old_value = SEQ_TRACE_TOKEN_LABEL(p);
@@ -2004,24 +2062,20 @@ BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2)
}
void erts_system_monitor_clear(Process *c_p) {
-#ifdef ERTS_SMP
if (c_p) {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
}
-#endif
erts_set_system_monitor(NIL);
erts_system_monitor_long_gc = 0;
erts_system_monitor_long_schedule = 0;
erts_system_monitor_large_heap = 0;
erts_system_monitor_flags.busy_port = 0;
erts_system_monitor_flags.busy_dist_port = 0;
-#ifdef ERTS_SMP
if (c_p) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-#endif
}
@@ -2131,8 +2185,8 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list)
int busy_port, busy_dist_port;
system_blocked = 1;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, monitor_pid, 0))
goto error;
@@ -2171,16 +2225,16 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list)
erts_system_monitor_flags.busy_port = !!busy_port;
erts_system_monitor_flags.busy_dist_port = !!busy_dist_port;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(prev);
}
error:
if (system_blocked) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
BIF_ERROR(p, BADARG);
@@ -2189,23 +2243,19 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list)
/* Begin: Trace for System Profiling */
void erts_system_profile_clear(Process *c_p) {
-#ifdef ERTS_SMP
if (c_p) {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
}
-#endif
erts_set_system_profile(NIL);
erts_system_profile_flags.scheduler = 0;
erts_system_profile_flags.runnable_procs = 0;
erts_system_profile_flags.runnable_ports = 0;
erts_system_profile_flags.exclusive = 0;
-#ifdef ERTS_SMP
if (c_p) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-#endif
}
static Eterm system_profile_get(Process *p) {
@@ -2267,8 +2317,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
int scheduler, runnable_procs, runnable_ports, exclusive;
system_blocked = 1;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
/* Check if valid process, no locks are taken */
@@ -2319,8 +2369,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
erts_system_profile_flags.runnable_procs = !!runnable_procs;
erts_system_profile_flags.exclusive = !!exclusive;
erts_system_profile_ts_type = ts;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(prev);
@@ -2328,8 +2378,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
error:
if (system_blocked) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
BIF_ERROR(p, BADARG);
@@ -2352,9 +2402,9 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
typedef struct {
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Eterm target;
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
} ErtsTraceDeliveredAll;
static void
@@ -2362,31 +2412,20 @@ reply_trace_delivered_all(void *vtdarp)
{
ErtsTraceDeliveredAll *tdarp = (ErtsTraceDeliveredAll *) vtdarp;
- if (erts_smp_atomic32_dec_read_nob(&tdarp->refc) == 0) {
+ if (erts_atomic32_dec_read_nob(&tdarp->refc) == 0) {
Eterm ref_copy, msg;
Process *rp = tdarp->proc;
Eterm *hp = NULL;
ErlOffHeap *ohp;
-#ifdef ERTS_SMP
ErlHeapFragment *bp;
bp = new_message_buffer(4 + NC_HEAP_SIZE(tdarp->ref));
hp = &bp->mem[0];
ohp = &bp->off_heap;
-#else
- ErtsProcLocks rp_locks = 0;
- ErtsMessage *mp;
- mp = erts_alloc_message_heap(
- rp, &rp_locks, 4 + NC_HEAP_SIZE(tdarp->ref), &hp, &ohp);
-#endif
ref_copy = STORE_NC(&hp, ohp, tdarp->ref);
msg = TUPLE3(hp, am_trace_delivered, tdarp->target, ref_copy);
-#ifdef ERTS_SMP
erts_send_sys_msg_proc(rp->common.id, rp->common.id, msg, bp);
-#else
- erts_queue_message(rp, rp_locks, mp, msg, am_system);
-#endif
erts_free(ERTS_ALC_T_MISC_AUX_WORK, vtdarp);
erts_proc_dec_refc(rp);
@@ -2407,7 +2446,7 @@ trace_delivered_1(BIF_ALIST_1)
hp = &tdarp->ref_heap[0];
tdarp->ref = STORE_NC(&hp, NULL, ref);
tdarp->target = BIF_ARG_1;
- erts_smp_atomic32_init_nob(&tdarp->refc,
+ erts_atomic32_init_nob(&tdarp->refc,
(erts_aint32_t) erts_no_schedulers);
erts_proc_add_refc(BIF_P, 1);
erts_schedule_multi_misc_aux_work(0,
diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c
index 7c70217d8d..19d46537f9 100644
--- a/erts/emulator/beam/erl_bif_unique.c
+++ b/erts/emulator/beam/erl_bif_unique.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,12 +22,15 @@
# include "config.h"
#endif
+#define ERL_BIF_UNIQUE_C__
#include "sys.h"
#include "erl_vm.h"
#include "erl_alloc.h"
#include "export.h"
#include "bif.h"
#include "erl_bif_unique.h"
+#include "hash.h"
+#include "erl_binary.h"
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Reference *
@@ -58,21 +61,32 @@ static union {
static Uint32 max_thr_id;
#endif
+static void init_magic_ref_tables(void);
+
+static Uint64 ref_init_value;
+
static void
init_reference(void)
{
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ ref_init_value = 0;
+ ref_init_value |= (Uint64) tv.tv_sec;
+ ref_init_value |= ((Uint64) tv.tv_usec) << 32;
+ ref_init_value *= (Uint64) 268438039;
+ ref_init_value += (Uint64) tv.tv_usec;
#ifdef DEBUG
max_thr_id = (Uint32) erts_no_schedulers;
-#ifdef ERTS_DIRTY_SCHEDULERS
max_thr_id += (Uint32) erts_no_dirty_cpu_schedulers;
max_thr_id += (Uint32) erts_no_dirty_io_schedulers;
#endif
-#endif
- erts_atomic64_init_nob(&global_reference.count, 0);
+ erts_atomic64_init_nob(&global_reference.count,
+ (erts_aint64_t) ref_init_value);
+ init_magic_ref_tables();
}
static ERTS_INLINE void
-global_make_ref_in_array(Uint32 thr_id, Uint32 ref[ERTS_MAX_REF_NUMBERS])
+global_make_ref_in_array(Uint32 thr_id, Uint32 ref[ERTS_REF_NUMBERS])
{
Uint64 value;
@@ -82,7 +96,7 @@ global_make_ref_in_array(Uint32 thr_id, Uint32 ref[ERTS_MAX_REF_NUMBERS])
}
static ERTS_INLINE void
-make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
{
ErtsSchedulerData *esdp = erts_get_scheduler_data();
if (esdp)
@@ -92,15 +106,23 @@ make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
}
void
-erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+erts_make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
{
make_ref_in_array(ref);
}
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
+void
+erts_make_magic_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ make_ref_in_array(ref);
+ ASSERT(!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__));
+ ref[1] |= ERTS_REF1_MAGIC_MARKER_BIT__;
+}
+
+Eterm erts_make_ref_in_buffer(Eterm buffer[ERTS_REF_THING_SIZE])
{
Eterm* hp = buffer;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ Uint32 ref[ERTS_REF_NUMBERS];
make_ref_in_array(ref);
write_ref_thing(hp, ref[0], ref[1], ref[2]);
@@ -110,11 +132,11 @@ Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
Eterm erts_make_ref(Process *c_p)
{
Eterm* hp;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ Uint32 ref[ERTS_REF_NUMBERS];
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
- hp = HAlloc(c_p, REF_THING_SIZE);
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
make_ref_in_array(ref);
write_ref_thing(hp, ref[0], ref[1], ref[2]);
@@ -122,6 +144,274 @@ Eterm erts_make_ref(Process *c_p)
return make_internal_ref(hp);
}
+/*
+ * Magic reference tables
+ */
+
+typedef struct {
+ HashBucket hash;
+ ErtsMagicBinary *mb;
+ Uint64 value;
+ Uint32 thr_id;
+} ErtsMagicRefTableEntry;
+
+typedef struct {
+ erts_rwmtx_t rwmtx;
+ Hash hash;
+ char name[32];
+} ErtsMagicRefTable;
+
+typedef struct {
+ union {
+ ErtsMagicRefTable table;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsMagicRefTable))];
+ } u;
+} ErtsAlignedMagicRefTable;
+
+ErtsAlignedMagicRefTable *magic_ref_table;
+
+ErtsMagicBinary *
+erts_magic_ref_lookup_bin__(Uint32 refn[ERTS_REF_NUMBERS])
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMagicBinary *mb;
+ ErtsMagicRefTable *tblp;
+
+ ASSERT(erts_is_ref_numbers_magic(refn));
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+ if (!tep)
+ mb = NULL;
+ else {
+ erts_aint_t refc;
+ mb = tep->mb;
+ refc = erts_refc_inc_unless(&mb->intern.refc, 0, 0);
+ if (refc == 0)
+ mb = NULL;
+ }
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ return mb;
+}
+
+void
+erts_magic_ref_save_bin__(Eterm ref)
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMRefThing *mrtp;
+ ErtsMagicRefTable *tblp;
+ Uint32 *refn;
+
+ ASSERT(is_internal_magic_ref(ref));
+
+ mrtp = (ErtsMRefThing *) internal_ref_val(ref);
+ refn = mrtp->mb->refn;
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ if (!tep) {
+ ErtsMagicRefTableEntry *used_tep;
+
+ ASSERT(tmpl.value == erts_get_ref_numbers_value(refn));
+ ASSERT(tmpl.thr_id == erts_get_ref_numbers_thr_id(refn));
+
+ if (tblp != &magic_ref_table[0].u.table) {
+ tep = erts_alloc(ERTS_ALC_T_MREF_NSCHED_ENT,
+ sizeof(ErtsNSchedMagicRefTableEntry));
+ }
+ else {
+ tep = erts_alloc(ERTS_ALC_T_MREF_ENT,
+ sizeof(ErtsMagicRefTableEntry));
+ tep->thr_id = tmpl.thr_id;
+ }
+
+ tep->value = tmpl.value;
+ tep->mb = mrtp->mb;
+
+ erts_rwmtx_rwlock(&tblp->rwmtx);
+
+ used_tep = hash_put(&tblp->hash, tep);
+
+ erts_rwmtx_rwunlock(&tblp->rwmtx);
+
+ if (used_tep != tep) {
+ if (tblp != &magic_ref_table[0].u.table)
+ erts_free(ERTS_ALC_T_MREF_NSCHED_ENT, (void *) tep);
+ else
+ erts_free(ERTS_ALC_T_MREF_ENT, (void *) tep);
+ }
+ }
+}
+
+void
+erts_magic_ref_remove_bin(Uint32 refn[ERTS_REF_NUMBERS])
+{
+ ErtsMagicRefTableEntry tmpl;
+ ErtsMagicRefTableEntry *tep;
+ ErtsMagicRefTable *tblp;
+
+ tmpl.value = erts_get_ref_numbers_value(refn);
+ tmpl.thr_id = erts_get_ref_numbers_thr_id(refn);
+
+ if (tmpl.thr_id > erts_no_schedulers)
+ tblp = &magic_ref_table[0].u.table;
+ else
+ tblp = &magic_ref_table[tmpl.thr_id].u.table;
+
+ erts_rwmtx_rlock(&tblp->rwmtx);
+
+ tep = (ErtsMagicRefTableEntry *) hash_get(&tblp->hash, &tmpl);
+
+ erts_rwmtx_runlock(&tblp->rwmtx);
+
+ if (tep) {
+
+ ASSERT(tmpl.value == erts_get_ref_numbers_value(refn));
+ ASSERT(tmpl.thr_id == erts_get_ref_numbers_thr_id(refn));
+
+ erts_rwmtx_rwlock(&tblp->rwmtx);
+
+ tep = hash_remove(&tblp->hash, &tmpl);
+ ASSERT(tep);
+
+ erts_rwmtx_rwunlock(&tblp->rwmtx);
+
+ if (tblp != &magic_ref_table[0].u.table)
+ erts_free(ERTS_ALC_T_MREF_NSCHED_ENT, (void *) tep);
+ else
+ erts_free(ERTS_ALC_T_MREF_ENT, (void *) tep);
+ }
+}
+
+static int nsched_mreft_cmp(void *ve1, void *ve2)
+{
+ ErtsNSchedMagicRefTableEntry *e1 = ve1;
+ ErtsNSchedMagicRefTableEntry *e2 = ve2;
+ return e1->value != e2->value;
+}
+
+static int non_nsched_mreft_cmp(void *ve1, void *ve2)
+{
+ ErtsMagicRefTableEntry *e1 = ve1;
+ ErtsMagicRefTableEntry *e2 = ve2;
+ return e1->value != e2->value || e1->thr_id != e2->thr_id;
+}
+
+static HashValue nsched_mreft_hash(void *ve)
+{
+ ErtsNSchedMagicRefTableEntry *e = ve;
+ return (HashValue) e->value;
+}
+
+static HashValue non_nsched_mreft_hash(void *ve)
+{
+ ErtsMagicRefTableEntry *e = ve;
+ HashValue h;
+ h = (HashValue) e->thr_id;
+ h *= 268440163;
+ h += (HashValue) e->value;
+ return h;
+}
+
+static void *mreft_alloc(void *ve)
+{
+ /*
+ * We allocate the element before
+ * hash_put() and pass it as
+ * template which we get as
+ * input...
+ */
+ return ve;
+}
+
+static void mreft_free(void *ve)
+{
+ /*
+ * We free the element ourselves
+ * after hash_remove()...
+ */
+}
+
+static void *mreft_meta_alloc(int i, size_t size)
+{
+ return erts_alloc(ERTS_ALC_T_MREF_TAB_BKTS, size);
+}
+
+static void mreft_meta_free(int i, void *ptr)
+{
+ erts_free(ERTS_ALC_T_MREF_TAB_BKTS, ptr);
+}
+
+static void
+init_magic_ref_tables(void)
+{
+ HashFunctions hash_funcs;
+ int i;
+ ErtsMagicRefTable *tblp;
+
+ magic_ref_table = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_MREF_TAB,
+ (sizeof(ErtsAlignedMagicRefTable)
+ * (erts_no_schedulers + 1)));
+
+ hash_funcs.hash = non_nsched_mreft_hash;
+ hash_funcs.cmp = non_nsched_mreft_cmp;
+
+ hash_funcs.alloc = mreft_alloc;
+ hash_funcs.free = mreft_free;
+ hash_funcs.meta_alloc = mreft_meta_alloc;
+ hash_funcs.meta_free = mreft_meta_free;
+ hash_funcs.meta_print = erts_print;
+
+ tblp = &magic_ref_table[0].u.table;
+ erts_snprintf(&tblp->name[0], sizeof(tblp->name),
+ "magic_ref_table_0");
+ hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
+ hash_funcs.hash = nsched_mreft_hash;
+ hash_funcs.cmp = nsched_mreft_cmp;
+
+ for (i = 1; i <= erts_no_schedulers; i++) {
+ ErtsMagicRefTable *tblp = &magic_ref_table[i].u.table;
+ erts_snprintf(&tblp->name[0], sizeof(tblp->name),
+ "magic_ref_table_%d", i);
+ hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ }
+}
+
+void erts_ref_bin_free(ErtsMagicBinary *mb)
+{
+ erts_bin_free((Binary *) mb);
+}
+
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Unique Integer *
\* */
@@ -147,10 +437,8 @@ init_unique_integer(void)
{
int bits;
unique_data.r.o.val0_max = (Uint64) erts_no_schedulers;
-#ifdef ERTS_DIRTY_SCHEDULERS
unique_data.r.o.val0_max += (Uint64) erts_no_dirty_cpu_schedulers;
unique_data.r.o.val0_max += (Uint64) erts_no_dirty_io_schedulers;
-#endif
bits = erts_fit_in_bits_int64(unique_data.r.o.val0_max);
unique_data.r.o.left_shift = bits;
unique_data.r.o.right_shift = 64 - bits;
@@ -498,7 +786,7 @@ void
erts_sched_bif_unique_init(ErtsSchedulerData *esdp)
{
esdp->unique = (Uint64) 0;
- esdp->ref = (Uint64) 0;
+ esdp->ref = (Uint64) ref_init_value;
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
@@ -511,9 +799,9 @@ BIF_RETTYPE make_ref_0(BIF_ALIST_0)
BIF_RETTYPE res;
Eterm* hp;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
- hp = HAlloc(BIF_P, REF_THING_SIZE);
+ hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE);
res = erts_sched_make_ref_in_buffer(erts_proc_sched_data(BIF_P), hp);
diff --git a/erts/emulator/beam/erl_bif_unique.h b/erts/emulator/beam/erl_bif_unique.h
index c6481864d0..944788c67c 100644
--- a/erts/emulator/beam/erl_bif_unique.h
+++ b/erts/emulator/beam/erl_bif_unique.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,16 +21,25 @@
#ifndef ERTS_BIF_UNIQUE_H__
#define ERTS_BIF_UNIQUE_H__
+#include "erl_term.h"
#include "erl_process.h"
#include "big.h"
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
void erts_bif_unique_init(void);
void erts_sched_bif_unique_init(ErtsSchedulerData *esdp);
/* reference */
Eterm erts_make_ref(Process *);
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]);
-void erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+Eterm erts_make_ref_in_buffer(Eterm buffer[ERTS_REF_THING_SIZE]);
+void erts_make_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS]);
+void erts_make_magic_ref_in_array(Uint32 ref[ERTS_REF_NUMBERS]);
+void erts_magic_ref_remove_bin(Uint32 refn[ERTS_REF_NUMBERS]);
+void erts_magic_ref_save_bin__(Eterm ref);
+ErtsMagicBinary *erts_magic_ref_lookup_bin__(Uint32 refn[ERTS_REF_NUMBERS]);
+
/* strict monotonic counter */
@@ -67,19 +76,35 @@ Eterm erts_debug_make_unique_integer(Process *c_p,
Eterm etval1);
-ERTS_GLB_INLINE void erts_set_ref_numbers(Uint32 ref[ERTS_MAX_REF_NUMBERS],
+ERTS_GLB_INLINE void erts_set_ref_numbers(Uint32 ref[ERTS_REF_NUMBERS],
Uint32 thr_id, Uint64 value);
-ERTS_GLB_INLINE Uint32 erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
-ERTS_GLB_INLINE Uint64 erts_get_ref_numbers_value(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+ERTS_GLB_INLINE Uint32 erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE int erts_is_ref_numbers_magic(Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE Uint64 erts_get_ref_numbers_value(Uint32 ref[ERTS_REF_NUMBERS]);
ERTS_GLB_INLINE void erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
- Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+ Uint32 ref[ERTS_REF_NUMBERS]);
+ERTS_GLB_INLINE void erts_sched_make_magic_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_REF_NUMBERS]);
ERTS_GLB_INLINE Eterm erts_sched_make_ref_in_buffer(ErtsSchedulerData *esdp,
- Eterm buffer[REF_THING_SIZE]);
+ Eterm buffer[ERTS_REF_THING_SIZE]);
+ERTS_GLB_INLINE Eterm erts_mk_magic_ref(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp);
+ERTS_GLB_INLINE Binary *erts_magic_ref2bin(Eterm mref);
+ERTS_GLB_INLINE void erts_magic_ref_save_bin(Eterm ref);
+ERTS_GLB_INLINE ErtsMagicBinary *erts_magic_ref_lookup_bin(Uint32 ref[ERTS_REF_NUMBERS]);
+
+#define ERTS_REF1_MAGIC_MARKER_BIT_NO__ \
+ (_REF_NUM_SIZE-1)
+#define ERTS_REF1_MAGIC_MARKER_BIT__ \
+ (((Uint32) 1) << ERTS_REF1_MAGIC_MARKER_BIT_NO__)
+#define ERTS_REF1_THR_ID_MASK__ \
+ (ERTS_REF1_MAGIC_MARKER_BIT__-1)
+#define ERTS_REF1_NUM_MASK__ \
+ (~(ERTS_REF1_THR_ID_MASK__|ERTS_REF1_MAGIC_MARKER_BIT__))
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_set_ref_numbers(Uint32 ref[ERTS_MAX_REF_NUMBERS], Uint32 thr_id, Uint64 value)
+erts_set_ref_numbers(Uint32 ref[ERTS_REF_NUMBERS], Uint32 thr_id, Uint64 value)
{
/*
* We cannot use thread id in the first 18-bit word since
@@ -87,30 +112,39 @@ erts_set_ref_numbers(Uint32 ref[ERTS_MAX_REF_NUMBERS], Uint32 thr_id, Uint64 val
* we did, we would get really poor hash values. Instead
* we have to shuffle the bits a bit.
*/
- ASSERT(thr_id == (thr_id & ((Uint32) 0x3ffff)));
- ref[0] = (Uint32) (value & ((Uint64) 0x3ffff));
- ref[1] = (((Uint32) (value & ((Uint64) 0xfffc0000)))
- | (thr_id & ((Uint32) 0x3ffff)));
+ ASSERT(thr_id == (thr_id & ((Uint32) ERTS_REF1_THR_ID_MASK__)));
+ ref[0] = (Uint32) (value & ((Uint64) REF_MASK));
+ ref[1] = (((Uint32) (value & ((Uint64) ERTS_REF1_NUM_MASK__)))
+ | (thr_id & ((Uint32) ERTS_REF1_THR_ID_MASK__)));
ref[2] = (Uint32) ((value >> 32) & ((Uint64) 0xffffffff));
}
ERTS_GLB_INLINE Uint32
-erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+erts_get_ref_numbers_thr_id(Uint32 ref[ERTS_REF_NUMBERS])
{
- return ref[1] & ((Uint32) 0x3ffff);
+ return ref[1] & ((Uint32) ERTS_REF1_THR_ID_MASK__);
+}
+
+ERTS_GLB_INLINE int
+erts_is_ref_numbers_magic(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ return !!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__);
}
ERTS_GLB_INLINE Uint64
-erts_get_ref_numbers_value(Uint32 ref[ERTS_MAX_REF_NUMBERS])
+erts_get_ref_numbers_value(Uint32 ref[ERTS_REF_NUMBERS])
{
+ ERTS_CT_ASSERT((ERTS_REF1_NUM_MASK__ | REF_MASK) == 0xffffffff);
+ ERTS_CT_ASSERT((ERTS_REF1_NUM_MASK__ & REF_MASK) == 0);
+
return (((((Uint64) ref[2]) & ((Uint64) 0xffffffff)) << 32)
- | (((Uint64) ref[1]) & ((Uint64) 0xfffc0000))
- | (((Uint64) ref[0]) & ((Uint64) 0x3ffff)));
+ | (((Uint64) ref[1]) & ((Uint64) ERTS_REF1_NUM_MASK__))
+ | (((Uint64) ref[0]) & ((Uint64) REF_MASK)));
}
ERTS_GLB_INLINE void
erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
- Uint32 ref[ERTS_MAX_REF_NUMBERS])
+ Uint32 ref[ERTS_REF_NUMBERS])
{
Uint64 value;
@@ -119,18 +153,288 @@ erts_sched_make_ref_in_array(ErtsSchedulerData *esdp,
erts_set_ref_numbers(ref, (Uint32) esdp->thr_id, value);
}
+ERTS_GLB_INLINE void
+erts_sched_make_magic_ref_in_array(ErtsSchedulerData *esdp,
+ Uint32 ref[ERTS_REF_NUMBERS])
+{
+ erts_sched_make_ref_in_array(esdp, ref);
+ ASSERT(!(ref[1] & ERTS_REF1_MAGIC_MARKER_BIT__));
+ ref[1] |= ERTS_REF1_MAGIC_MARKER_BIT__;
+}
+
ERTS_GLB_INLINE Eterm
erts_sched_make_ref_in_buffer(ErtsSchedulerData *esdp,
- Eterm buffer[REF_THING_SIZE])
+ Eterm buffer[ERTS_REF_THING_SIZE])
{
Eterm* hp = buffer;
- Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ Uint32 ref[ERTS_REF_NUMBERS];
erts_sched_make_ref_in_array(esdp, ref);
write_ref_thing(hp, ref[0], ref[1], ref[2]);
return make_internal_ref(hp);
}
+ERTS_GLB_INLINE Eterm
+erts_mk_magic_ref(Eterm **hpp, ErlOffHeap *ohp, Binary *bp)
+{
+ Eterm *hp = *hpp;
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ write_magic_ref_thing(hp, ohp, (ErtsMagicBinary *) bp);
+ *hpp += ERTS_MAGIC_REF_THING_SIZE;
+ erts_refc_inc(&bp->intern.refc, 1);
+ OH_OVERHEAD(ohp, bp->orig_size / sizeof(Eterm));
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_magic_ref2bin(Eterm mref)
+{
+ ErtsMRefThing *mrtp;
+ ASSERT(is_internal_magic_ref(mref));
+ mrtp = (ErtsMRefThing *) internal_ref_val(mref);
+ return (Binary *) mrtp->mb;
+}
+
+/*
+ * Save the magic binary of a ref when the
+ * ref is exposed to the outside world...
+ */
+ERTS_GLB_INLINE void
+erts_magic_ref_save_bin(Eterm ref)
+{
+ if (is_internal_magic_ref(ref))
+ erts_magic_ref_save_bin__(ref);
+}
+
+/*
+ * Look up the magic binary of a magic ref
+ * when the ref comes from the outside world...
+ */
+ERTS_GLB_INLINE ErtsMagicBinary *
+erts_magic_ref_lookup_bin(Uint32 ref[ERTS_REF_NUMBERS])
+{
+ if (!erts_is_ref_numbers_magic(ref))
+ return NULL;
+ return erts_magic_ref_lookup_bin__(ref);
+}
+
+
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/*
+ * Storage of internal refs in misc structures...
+ */
+
+#include "erl_message.h"
+
+#if ERTS_REF_NUMBERS != 3
+# error fix this...
+#endif
+
+ERTS_GLB_INLINE int erts_internal_ref_number_cmp(Uint32 num1[ERTS_REF_NUMBERS],
+ Uint32 num2[ERTS_REF_NUMBERS]);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_internal_ref_number_cmp(Uint32 num1[ERTS_REF_NUMBERS],
+ Uint32 num2[ERTS_REF_NUMBERS])
+{
+ if (num1[2] != num2[2])
+ return num1[2] > num2[2] ? 1 : -1;
+ if (num1[1] != num2[1])
+ return num1[1] > num2[1] ? 1 : -1;
+ if (num1[0] != num2[0])
+ return num1[0] > num2[0] ? 1 : -1;
+ return 0;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/* Iref storage for all internal references... */
+typedef struct {
+ Uint32 is_magic;
+ union {
+ ErtsMagicBinary *mb;
+ Uint32 num[ERTS_REF_NUMBERS];
+ } u;
+} ErtsIRefStorage;
+
+void erts_ref_bin_free(ErtsMagicBinary *mb);
+
+ERTS_GLB_INLINE void erts_iref_storage_save(ErtsIRefStorage *iref, Eterm ref);
+ERTS_GLB_INLINE void erts_iref_storage_clean(ErtsIRefStorage *iref);
+ERTS_GLB_INLINE Uint erts_iref_storage_heap_size(ErtsIRefStorage *iref);
+ERTS_GLB_INLINE Eterm erts_iref_storage_make_ref(ErtsIRefStorage *iref,
+ Eterm **hpp, ErlOffHeap *ohp,
+ int clean_storage);
+ERTS_GLB_INLINE int erts_iref_storage_cmp(ErtsIRefStorage *iref1,
+ ErtsIRefStorage *iref2);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_iref_storage_save(ErtsIRefStorage *iref, Eterm ref)
+{
+ Eterm *hp;
+
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ ASSERT(is_internal_ref(ref));
+
+ hp = boxed_val(ref);
+
+ if (is_ordinary_ref_thing(hp)) {
+ ErtsORefThing *rtp = (ErtsORefThing *) hp;
+ iref->is_magic = 0;
+ iref->u.num[0] = rtp->num[0];
+ iref->u.num[1] = rtp->num[1];
+ iref->u.num[2] = rtp->num[2];
+ }
+ else {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) hp;
+ ASSERT(is_magic_ref_thing(hp));
+ iref->is_magic = 1;
+ iref->u.mb = mrtp->mb;
+ erts_refc_inc(&mrtp->mb->intern.refc, 1);
+ }
+}
+
+ERTS_GLB_INLINE void
+erts_iref_storage_clean(ErtsIRefStorage *iref)
+{
+ if (iref->is_magic && erts_refc_dectest(&iref->u.mb->intern.refc, 0) == 0)
+ erts_ref_bin_free(iref->u.mb);
+#ifdef DEBUG
+ sys_memset((void *) iref, 0xf, sizeof(ErtsIRefStorage));
+#endif
+}
+
+ERTS_GLB_INLINE Uint
+erts_iref_storage_heap_size(ErtsIRefStorage *iref)
+{
+ return iref->is_magic ? ERTS_MAGIC_REF_THING_SIZE : ERTS_REF_THING_SIZE;
+}
+
+ERTS_GLB_INLINE Eterm
+erts_iref_storage_make_ref(ErtsIRefStorage *iref,
+ Eterm **hpp, ErlOffHeap *ohp,
+ int clean_storage)
+{
+ Eterm *hp = *hpp;
+ if (!iref->is_magic) {
+ write_ref_thing(hp, iref->u.num[0], iref->u.num[1],
+ iref->u.num[2]);
+ *hpp += ERTS_REF_THING_SIZE;
+ }
+ else {
+ write_magic_ref_thing(hp, ohp, iref->u.mb);
+ OH_OVERHEAD(ohp, iref->u.mb->orig_size / sizeof(Eterm));
+ *hpp += ERTS_MAGIC_REF_THING_SIZE;
+ /*
+ * If we clean storage, the term inherits the
+ * refc increment of the cleaned storage...
+ */
+ if (!clean_storage)
+ erts_refc_inc(&iref->u.mb->intern.refc, 1);
+ }
+
+#ifdef DEBUG
+ if (clean_storage)
+ sys_memset((void *) iref, 0xf, sizeof(ErtsIRefStorage));
+#endif
+
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE int
+erts_iref_storage_cmp(ErtsIRefStorage *iref1,
+ ErtsIRefStorage *iref2)
+{
+ Uint32 *num1 = iref1->is_magic ? iref1->u.mb->refn : iref1->u.num;
+ Uint32 *num2 = iref2->is_magic ? iref2->u.mb->refn : iref2->u.num;
+ return erts_internal_ref_number_cmp(num1, num2);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+/* OIref storage for ordinary internal references only... */
+typedef struct {
+ Uint32 num[ERTS_REF_NUMBERS];
+} ErtsOIRefStorage;
+
+ERTS_GLB_INLINE void erts_oiref_storage_save(ErtsOIRefStorage *oiref,
+ Eterm ref);
+ERTS_GLB_INLINE Eterm erts_oiref_storage_make_ref(ErtsOIRefStorage *oiref,
+ Eterm **hpp);
+ERTS_GLB_INLINE int erts_oiref_storage_cmp(ErtsOIRefStorage *oiref1,
+ ErtsOIRefStorage *oiref2);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_oiref_storage_save(ErtsOIRefStorage *oiref, Eterm ref)
+{
+ ErtsORefThing *rtp;
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ ASSERT(is_internal_ordinary_ref(ref));
+
+ rtp = (ErtsORefThing *) internal_ref_val(ref);
+
+ oiref->num[0] = rtp->num[0];
+ oiref->num[1] = rtp->num[1];
+ oiref->num[2] = rtp->num[2];
+}
+
+ERTS_GLB_INLINE Eterm
+erts_oiref_storage_make_ref(ErtsOIRefStorage *oiref, Eterm **hpp)
+{
+ Eterm *hp = *hpp;
+ ERTS_CT_ASSERT(ERTS_REF_NUMBERS == 3);
+ write_ref_thing(hp, oiref->num[0], oiref->num[1], oiref->num[2]);
+ *hpp += ERTS_REF_THING_SIZE;
+ return make_internal_ref(hp);
+}
+
+ERTS_GLB_INLINE int
+erts_oiref_storage_cmp(ErtsOIRefStorage *oiref1,
+ ErtsOIRefStorage *oiref2)
+{
+ return erts_internal_ref_number_cmp(oiref1->num, oiref2->num);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ Eterm *hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+ return make_internal_ref(hp);
+}
+
+#endif
+
#endif /* ERTS_BIF_UNIQUE_H__ */
+
+#if (defined(ERTS_ALLOC_C__) || defined(ERL_BIF_UNIQUE_C__)) \
+ && !defined(ERTS_BIF_UNIQUE_H__FIX_ALLOC_TYPES__)
+#define ERTS_BIF_UNIQUE_H__FIX_ALLOC_TYPES__
+
+#include "hash.h"
+
+typedef struct {
+ HashBucket hash;
+ ErtsMagicBinary *mb;
+ Uint64 value;
+} ErtsNSchedMagicRefTableEntry;
+
+#endif
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index fdc5efea98..08edb43c49 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,34 +18,151 @@
* %CopyrightEnd%
*/
-#ifndef __ERL_BINARY_H
-#define __ERL_BINARY_H
+#ifndef ERL_BINARY_H__TYPES__
+#define ERL_BINARY_H__TYPES__
-#include "erl_threads.h"
-#include "bif.h"
+/*
+** Just like the driver binary but with initial flags
+** Note that the two structures Binary and ErlDrvBinary HAVE to
+** be equal except for extra fields in the beginning of the struct.
+** ErlDrvBinary is defined in erl_driver.h.
+** When driver_alloc_binary is called, a Binary is allocated, but
+** the pointer returned is to the address of the first element that
+** also occurs in the ErlDrvBinary struct (driver.*binary takes care if this).
+** The driver need never know about additions to the internal Binary of the
+** emulator. One should however NEVER be sloppy when mixing ErlDrvBinary
+** and Binary, the macros below can convert one type to the other, as they both
+** in reality are equal.
+*/
+
+#ifdef ARCH_32
+ /* *DO NOT USE* only for alignment. */
+#define ERTS_BINARY_STRUCT_ALIGNMENT Uint32 align__;
+#else
+#define ERTS_BINARY_STRUCT_ALIGNMENT
+#endif
+
+/* Add fields in binary_internals, otherwise the drivers crash */
+struct binary_internals {
+ UWord flags;
+ erts_refc_t refc;
+ ERTS_BINARY_STRUCT_ALIGNMENT
+};
+
+
+typedef struct binary {
+ struct binary_internals intern;
+ SWord orig_size;
+ char orig_bytes[1]; /* to be continued */
+} Binary;
+
+#define ERTS_SIZEOF_Binary(Sz) \
+ (offsetof(Binary,orig_bytes) + (Sz))
+
+#if ERTS_REF_NUMBERS != 3
+#error "Update ErtsMagicBinary"
+#endif
+
+typedef struct magic_binary ErtsMagicBinary;
+struct magic_binary {
+ struct binary_internals intern;
+ SWord orig_size;
+ int (*destructor)(Binary *);
+ Uint32 refn[ERTS_REF_NUMBERS];
+ ErtsAlcType_t alloc_type;
+ union {
+ struct {
+ ERTS_BINARY_STRUCT_ALIGNMENT
+ char data[1];
+ } aligned;
+ struct {
+ char data[1];
+ } unaligned;
+ } u;
+};
+
+#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - \
+ offsetof(ErtsMagicBinary,u.unaligned.data))
+
+typedef union {
+ Binary binary;
+ ErtsMagicBinary magic_binary;
+ struct {
+ struct binary_internals intern;
+ ErlDrvBinary binary;
+ } driver;
+} ErtsBinary;
/*
- * Maximum number of bytes to place in a heap binary.
+ * 'Binary' alignment:
+ * Address of orig_bytes[0] of a Binary should always be 8-byte aligned.
+ * It is assumed that the flags, refc, and orig_size fields are 4 bytes on
+ * 32-bits architectures and 8 bytes on 64-bits architectures.
*/
-#define ERL_ONHEAP_BIN_LIMIT 64
+#define ERTS_MAGIC_BIN_REFN(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.refn
+#define ERTS_MAGIC_BIN_ATYPE(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.alloc_type
+#define ERTS_MAGIC_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_DESTRUCTOR(BP) \
+ ((ErtsBinary *) (BP))->magic_binary.destructor
+#define ERTS_MAGIC_BIN_DATA(BP) \
+ ((void *) ((ErtsBinary *) (BP))->magic_binary.u.aligned.data)
+#define ERTS_MAGIC_BIN_DATA_SIZE(BP) \
+ ((BP)->orig_size - ERTS_MAGIC_DATA_OFFSET)
+#define ERTS_MAGIC_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_ORIG_SIZE(Sz) \
+ (ERTS_MAGIC_DATA_OFFSET + (Sz))
+#define ERTS_MAGIC_BIN_SIZE(Sz) \
+ (offsetof(ErtsMagicBinary,u.aligned.data) + (Sz))
+#define ERTS_MAGIC_BIN_FROM_DATA(DATA) \
+ ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.aligned.data)))
+
+/* On 32-bit arch these macro variants will save memory
+ by not forcing 8-byte alignment for the magic payload.
+*/
+#define ERTS_MAGIC_BIN_UNALIGNED_DATA(BP) \
+ ((void *) ((ErtsBinary *) (BP))->magic_binary.u.unaligned.data)
+#define ERTS_MAGIC_UNALIGNED_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.unaligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(BP) \
+ ((BP)->orig_size - ERTS_MAGIC_UNALIGNED_DATA_OFFSET)
+#define ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(Sz) \
+ (ERTS_MAGIC_UNALIGNED_DATA_OFFSET + (Sz))
+#define ERTS_MAGIC_BIN_UNALIGNED_SIZE(Sz) \
+ (offsetof(ErtsMagicBinary,u.unaligned.data) + (Sz))
+#define ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(DATA) \
+ ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.unaligned.data)))
+
+
+#define Binary2ErlDrvBinary(B) (&((ErtsBinary *) (B))->driver.binary)
+#define ErlDrvBinary2Binary(D) ((Binary *) \
+ (((char *) (D)) \
+ - offsetof(ErtsBinary, driver.binary)))
+
+/* A "magic" binary flag */
+#define BIN_FLAG_MAGIC 1
+#define BIN_FLAG_DRV 2
+
+#endif /* ERL_BINARY_H__TYPES__ */
+
+#if !defined(ERL_BINARY_H__) && !defined(ERTS_BINARY_TYPES_ONLY__)
+#define ERL_BINARY_H__
+
+#include "erl_threads.h"
+#include "bif.h"
+#include "erl_bif_unique.h"
+#include "erl_bits.h"
/*
- * This structure represents a SUB_BINARY.
- *
- * Note: The last field (orig) is not counted in arityval in the header.
- * This simplifies garbage collection.
+ * Maximum number of bytes to place in a heap binary.
*/
-typedef struct erl_sub_bin {
- Eterm thing_word; /* Subtag SUB_BINARY_SUBTAG. */
- Uint size; /* Binary size in bytes. */
- Uint offs; /* Offset into original binary. */
- byte bitsize;
- byte bitoffs;
- byte is_writable; /* The underlying binary is writable */
- Eterm orig; /* Original binary (REFC or HEAP binary). */
-} ErlSubBin;
+#define ERL_ONHEAP_BIN_LIMIT 64
#define ERL_SUB_BIN_SIZE (sizeof(ErlSubBin)/sizeof(Eterm))
#define HEADER_SUB_BIN _make_header(ERL_SUB_BIN_SIZE-2,_TAG_HEADER_SUB_BIN)
@@ -165,6 +282,17 @@ BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is
BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen);
+typedef union {
+ /*
+ * These two are almost always of
+ * the same size, but when fallback
+ * atomics are used they might
+ * differ in size.
+ */
+ erts_atomic_t smp_atomic_word;
+ erts_atomic_t atomic_word;
+} ErtsMagicIndirectionWord;
+
#if defined(__i386__) || !defined(__GNUC__)
/*
* Doubles aren't required to be 8-byte aligned on intel x86.
@@ -188,11 +316,16 @@ ERTS_GLB_INLINE Binary *erts_bin_nrml_alloc(Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc_fnf(Binary *bp, Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc(Binary *bp, Uint size);
ERTS_GLB_INLINE void erts_bin_free(Binary *bp);
+ERTS_GLB_INLINE void erts_bin_release(Binary *bp);
ERTS_GLB_INLINE Binary *erts_create_magic_binary_x(Uint size,
- void (*destructor)(Binary *),
+ int (*destructor)(Binary *),
+ ErtsAlcType_t alloc_type,
int unaligned);
ERTS_GLB_INLINE Binary *erts_create_magic_binary(Uint size,
- void (*destructor)(Binary *));
+ int (*destructor)(Binary *));
+ERTS_GLB_INLINE Binary *erts_create_magic_indirection(int (*destructor)(Binary *));
+ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp);
+ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -228,8 +361,6 @@ erts_free_aligned_binary_bytes(byte* buf)
# define CHICKEN_PAD (sizeof(void*) - 1)
#endif
-/* Caller must initialize 'refc'
-*/
ERTS_GLB_INLINE Binary *
erts_bin_drv_alloc_fnf(Uint size)
{
@@ -242,13 +373,12 @@ erts_bin_drv_alloc_fnf(Uint size)
ERTS_CHK_BIN_ALIGNMENT(res);
if (res) {
res->orig_size = size;
- res->flags = BIN_FLAG_DRV;
+ res->intern.flags = BIN_FLAG_DRV;
+ erts_refc_init(&res->intern.refc, 1);
}
return res;
}
-/* Caller must initialize 'refc'
-*/
ERTS_GLB_INLINE Binary *
erts_bin_drv_alloc(Uint size)
{
@@ -260,13 +390,11 @@ erts_bin_drv_alloc(Uint size)
res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
res->orig_size = size;
- res->flags = BIN_FLAG_DRV;
+ res->intern.flags = BIN_FLAG_DRV;
+ erts_refc_init(&res->intern.refc, 1);
return res;
}
-
-/* Caller must initialize 'refc'
-*/
ERTS_GLB_INLINE Binary *
erts_bin_nrml_alloc(Uint size)
{
@@ -278,7 +406,8 @@ erts_bin_nrml_alloc(Uint size)
res = erts_alloc(ERTS_ALC_T_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
res->orig_size = size;
- res->flags = 0;
+ res->intern.flags = 0;
+ erts_refc_init(&res->intern.refc, 1);
return res;
}
@@ -287,9 +416,9 @@ erts_bin_realloc_fnf(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ ErtsAlcType_t type = (bp->intern.flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
: ERTS_ALC_T_BINARY;
- ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
+ ASSERT((bp->intern.flags & BIN_FLAG_MAGIC) == 0);
if (bsize < size) /* overflow */
return NULL;
nbp = erts_realloc_fnf(type, (void *) bp, bsize);
@@ -304,9 +433,9 @@ erts_bin_realloc(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ ErtsAlcType_t type = (bp->intern.flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
: ERTS_ALC_T_BINARY;
- ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
+ ASSERT((bp->intern.flags & BIN_FLAG_MAGIC) == 0);
if (bsize < size) /* overflow */
erts_realloc_enomem(type, bp, size);
nbp = erts_realloc_fnf(type, (void *) bp, bsize);
@@ -320,39 +449,77 @@ erts_bin_realloc(Binary *bp, Uint size)
ERTS_GLB_INLINE void
erts_bin_free(Binary *bp)
{
- if (bp->flags & BIN_FLAG_MAGIC)
- ERTS_MAGIC_BIN_DESTRUCTOR(bp)(bp);
- if (bp->flags & BIN_FLAG_DRV)
+ if (bp->intern.flags & BIN_FLAG_MAGIC) {
+ if (!ERTS_MAGIC_BIN_DESTRUCTOR(bp)(bp)) {
+ /* Destructor took control of the deallocation */
+ return;
+ }
+ erts_magic_ref_remove_bin(ERTS_MAGIC_BIN_REFN(bp));
+ erts_free(ERTS_MAGIC_BIN_ATYPE(bp), (void *) bp);
+ }
+ else if (bp->intern.flags & BIN_FLAG_DRV)
erts_free(ERTS_ALC_T_DRV_BINARY, (void *) bp);
else
erts_free(ERTS_ALC_T_BINARY, (void *) bp);
}
+ERTS_GLB_INLINE void
+erts_bin_release(Binary *bp)
+{
+ if (erts_refc_dectest(&bp->intern.refc, 0) == 0) {
+ erts_bin_free(bp);
+ }
+}
+
ERTS_GLB_INLINE Binary *
-erts_create_magic_binary_x(Uint size, void (*destructor)(Binary *),
+erts_create_magic_binary_x(Uint size, int (*destructor)(Binary *),
+ ErtsAlcType_t alloc_type,
int unaligned)
{
Uint bsize = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_SIZE(size)
: ERTS_MAGIC_BIN_SIZE(size);
- Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize);
+ Binary* bptr = erts_alloc_fnf(alloc_type, bsize);
ASSERT(bsize > size);
if (!bptr)
- erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize);
+ erts_alloc_n_enomem(ERTS_ALC_T2N(alloc_type), bsize);
ERTS_CHK_BIN_ALIGNMENT(bptr);
- bptr->flags = BIN_FLAG_MAGIC;
+ bptr->intern.flags = BIN_FLAG_MAGIC;
bptr->orig_size = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(size)
: ERTS_MAGIC_BIN_ORIG_SIZE(size);
- erts_refc_init(&bptr->refc, 0);
+ erts_refc_init(&bptr->intern.refc, 0);
ERTS_MAGIC_BIN_DESTRUCTOR(bptr) = destructor;
+ ERTS_MAGIC_BIN_ATYPE(bptr) = alloc_type;
+ erts_make_magic_ref_in_array(ERTS_MAGIC_BIN_REFN(bptr));
return bptr;
}
ERTS_GLB_INLINE Binary *
-erts_create_magic_binary(Uint size, void (*destructor)(Binary *))
+erts_create_magic_binary(Uint size, int (*destructor)(Binary *))
+{
+ return erts_create_magic_binary_x(size, destructor,
+ ERTS_ALC_T_BINARY, 0);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_create_magic_indirection(int (*destructor)(Binary *))
+{
+ return erts_create_magic_binary_x(sizeof(ErtsMagicIndirectionWord),
+ destructor,
+ ERTS_ALC_T_MINDIRECTION,
+ 1); /* Not 64-bit aligned,
+ but word aligned */
+}
+
+ERTS_GLB_INLINE erts_atomic_t *
+erts_binary_to_magic_indirection(Binary *bp)
{
- return erts_create_magic_binary_x(size, destructor, 0);
+ ErtsMagicIndirectionWord *mip;
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION);
+ mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp);
+ return &mip->smp_atomic_word;
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* !__ERL_BINARY_H */
+#endif /* !ERL_BINARY_H__ */
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 6bf52fb303..3a16913473 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -32,15 +32,6 @@
#include "erl_bits.h"
#include "erl_binary.h"
-#ifdef MAX
-#undef MAX
-#endif
-#define MAX(x,y) (((x)>(y))?(x):(y))
-#ifdef MIN
-#undef MIN
-#endif
-#define MIN(x,y) (((x)<(y))?(x):(y))
-
#if defined(WORDS_BIGENDIAN)
# define BIT_ENDIAN_MACHINE 0
#else
@@ -64,30 +55,19 @@
static byte get_bit(byte b, size_t a_offs);
-#if defined(ERTS_SMP)
/* the state resides in the current process' scheduler data */
-#elif defined(ERL_BITS_REENTRANT)
-/* reentrant API but with a hidden single global state, for testing only */
-struct erl_bits_state ErlBitsState_;
-#else
-/* non-reentrant API with a single global state */
-struct erl_bits_state ErlBitsState;
-#endif
#define byte_buf (ErlBitsState.byte_buf_)
#define byte_buf_len (ErlBitsState.byte_buf_len_)
-static erts_smp_atomic_t bits_bufs_size;
+static erts_atomic_t bits_bufs_size;
Uint
erts_bits_bufs_size(void)
{
- return (Uint) erts_smp_atomic_read_nob(&bits_bufs_size);
+ return (Uint) erts_atomic_read_nob(&bits_bufs_size);
}
-#if !defined(ERTS_SMP)
-static
-#endif
void
erts_bits_init_state(ERL_BITS_PROTO_0)
{
@@ -97,32 +77,22 @@ erts_bits_init_state(ERL_BITS_PROTO_0)
erts_bin_offset = 0;
}
-#if defined(ERTS_SMP)
void
erts_bits_destroy_state(ERL_BITS_PROTO_0)
{
erts_free(ERTS_ALC_T_BITS_BUF, byte_buf);
}
-#endif
void
erts_init_bits(void)
{
ERTS_CT_ASSERT(offsetof(Binary,orig_bytes) % 8 == 0);
ERTS_CT_ASSERT(offsetof(ErtsMagicBinary,u.aligned.data) % 8 == 0);
- ERTS_CT_ASSERT(ERTS_MAGIC_BIN_BYTES_TO_ALIGN ==
- (offsetof(ErtsMagicBinary,u.aligned.data)
- - offsetof(ErtsMagicBinary,u.unaligned.data)));
ERTS_CT_ASSERT(offsetof(ErtsBinary,driver.binary.orig_bytes)
== offsetof(Binary,orig_bytes));
- erts_smp_atomic_init_nob(&bits_bufs_size, 0);
-#if defined(ERTS_SMP)
+ erts_atomic_init_nob(&bits_bufs_size, 0);
/* erl_process.c calls erts_bits_init_state() on all state instances */
-#else
- ERL_BITS_DECLARE_STATEP;
- erts_bits_init_state(ERL_BITS_ARGS_0);
-#endif
}
/*****************************************************************
@@ -756,7 +726,7 @@ static void
ERTS_INLINE need_byte_buf(ERL_BITS_PROTO_1(int need))
{
if (byte_buf_len < need) {
- erts_smp_atomic_add_nob(&bits_bufs_size, need - byte_buf_len);
+ erts_atomic_add_nob(&bits_bufs_size, need - byte_buf_len);
byte_buf_len = need;
byte_buf = erts_realloc(ERTS_ALC_T_BITS_BUF, byte_buf, byte_buf_len);
}
@@ -1324,7 +1294,14 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
goto badarg;
}
}
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ c_p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
used_size_in_bits = erts_bin_offset + build_size_in_bits;
+
sb->is_writable = 0; /* Make sure that no one else can write. */
pb->size = NBYTES(used_size_in_bits);
pb->flags |= PB_ACTIVE_WRITER;
@@ -1398,16 +1375,27 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
goto badarg;
}
}
- used_size_in_bits = erts_bin_offset + build_size_in_bits;
- used_size_in_bytes = NBYTES(used_size_in_bits);
- bin_size = 2*used_size_in_bytes;
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ c_p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
+ used_size_in_bits = erts_bin_offset + build_size_in_bits;
+ used_size_in_bytes = NBYTES(used_size_in_bits);
+
+ if(used_size_in_bits < (ERTS_UINT_MAX / 2)) {
+ bin_size = 2 * used_size_in_bytes;
+ } else {
+ bin_size = NBYTES(ERTS_UINT_MAX);
+ }
+
bin_size = (bin_size < 256) ? 256 : bin_size;
/*
* Allocate the binary data struct itself.
*/
bptr = erts_bin_nrml_alloc(bin_size);
- erts_refc_init(&bptr->refc, 1);
erts_current_bin = (byte *) bptr->orig_bytes;
/*
@@ -1491,6 +1479,12 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
* Calculate new size in bytes.
*/
erts_bin_offset = 8*sb->size + sb->bitsize;
+
+ if((ERTS_UINT_MAX - build_size_in_bits) < erts_bin_offset) {
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+
pos_in_bits_after_build = erts_bin_offset + build_size_in_bits;
pb->size = (pos_in_bits_after_build+7) >> 3;
pb->flags |= PB_ACTIVE_WRITER;
@@ -1521,14 +1515,11 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
* binary and copy the contents of the old binary into it.
*/
Binary* bptr = erts_bin_nrml_alloc(new_size);
- erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, binp->orig_bytes, binp->orig_size);
pb->flags |= PB_IS_WRITABLE | PB_ACTIVE_WRITER;
pb->val = bptr;
pb->bytes = (byte *) bptr->orig_bytes;
- if (erts_refc_dectest(&binp->refc, 0) == 0) {
- erts_bin_free(binp);
- }
+ erts_bin_release(binp);
}
}
erts_current_bin = pb->bytes;
@@ -1568,7 +1559,6 @@ erts_bs_init_writable(Process* p, Eterm sz)
* Allocate the binary data struct itself.
*/
bptr = erts_bin_nrml_alloc(bin_size);
- erts_refc_init(&bptr->refc, 1);
/*
* Now allocate the ProcBin on the heap.
@@ -1644,7 +1634,7 @@ erts_bs_get_unaligned_uint32(ErlBinMatchBuffer* mb)
return LSB[0] | (LSB[1]<<8) | (LSB[2]<<16) | (LSB[3]<<24);
}
-void
+static void
erts_align_utf8_bytes(ErlBinMatchBuffer* mb, byte* buf)
{
Uint bits = mb->size - mb->offset;
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index 4bd5b24157..7beef5cfda 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,6 +22,23 @@
#define __ERL_BITS_H__
/*
+ * This structure represents a SUB_BINARY.
+ *
+ * Note: The last field (orig) is not counted in arityval in the header.
+ * This simplifies garbage collection.
+ */
+
+typedef struct erl_sub_bin {
+ Eterm thing_word; /* Subtag SUB_BINARY_SUBTAG. */
+ Uint size; /* Binary size in bytes. */
+ Uint offs; /* Offset into original binary. */
+ byte bitsize;
+ byte bitoffs;
+ byte is_writable; /* The underlying binary is writable */
+ Eterm orig; /* Original binary (REFC or HEAP binary). */
+} ErlSubBin;
+
+/*
* This structure represents a binary to be matched.
*/
@@ -67,31 +84,14 @@ typedef struct erl_bin_match_struct{
#define ms_matchbuffer(_Ms) &(((ErlBinMatchState*) boxed_val(_Ms))->mb)
-#if defined(ERTS_SMP)
-#define ERL_BITS_REENTRANT
-#else
-/* uncomment to test the reentrant API in the non-SMP runtime system */
-/* #define ERL_BITS_REENTRANT */
-#endif
-
-#ifdef ERL_BITS_REENTRANT
-
/*
* Reentrant API with the state passed as a parameter.
* (Except when the current Process* already is a parameter.)
*/
-#ifdef ERTS_SMP
/* the state resides in the current process' scheduler data */
#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS
#define ERL_BITS_RELOAD_STATEP(P) do{EBS = &erts_proc_sched_data((P))->erl_bits_state;}while(0)
#define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &erts_proc_sched_data((P))->erl_bits_state
-#else
-/* reentrant API but with a hidden single global state, for testing only */
-extern struct erl_bits_state ErlBitsState_;
-#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS = &ErlBitsState_
-#define ERL_BITS_RELOAD_STATEP(P) do{}while(0)
-#define ERL_BITS_DEFINE_STATEP(P) ERL_BITS_DECLARE_STATEP
-#endif
#define ErlBitsState (*EBS)
#define ERL_BITS_PROTO_0 struct erl_bits_state *EBS
@@ -103,26 +103,6 @@ extern struct erl_bits_state ErlBitsState_;
#define ERL_BITS_ARGS_2(ARG1,ARG2) EBS, ARG1, ARG2
#define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) EBS, ARG1, ARG2, ARG3
-#else /* ERL_BITS_REENTRANT */
-
-/*
- * Non-reentrant API with a single global state.
- */
-extern struct erl_bits_state ErlBitsState;
-#define ERL_BITS_DECLARE_STATEP /*empty*/
-#define ERL_BITS_RELOAD_STATEP(P) do{}while(0)
-#define ERL_BITS_DEFINE_STATEP(P) /*empty*/
-
-#define ERL_BITS_PROTO_0 void
-#define ERL_BITS_PROTO_1(PARM1) PARM1
-#define ERL_BITS_PROTO_2(PARM1,PARM2) PARM1, PARM2
-#define ERL_BITS_PROTO_3(PARM1,PARM2,PARM3) PARM1, PARM2, PARM3
-#define ERL_BITS_ARGS_0 /*empty*/
-#define ERL_BITS_ARGS_1(ARG1) ARG1
-#define ERL_BITS_ARGS_2(ARG1,ARG2) ARG1, ARG2
-#define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) ARG1, ARG2, ARG3
-
-#endif /* ERL_BITS_REENTRANT */
#define erts_bin_offset (ErlBitsState.erts_bin_offset_)
#define erts_current_bin (ErlBitsState.erts_current_bin_)
@@ -131,7 +111,7 @@ extern struct erl_bits_state ErlBitsState;
#define copy_binary_to_buffer(DstBuffer, DstBufOffset, SrcBuffer, SrcBufferOffset, NumBits) \
do { \
if (BIT_OFFSET(DstBufOffset) == 0 && (SrcBufferOffset == 0) && \
- (BIT_OFFSET(NumBits)==0)) { \
+ (BIT_OFFSET(NumBits)==0) && (NumBits != 0)) { \
sys_memcpy(DstBuffer+BYTE_OFFSET(DstBufOffset), \
SrcBuffer, NBYTES(NumBits)); \
} else { \
@@ -141,10 +121,8 @@ extern struct erl_bits_state ErlBitsState;
} while (0)
void erts_init_bits(void); /* Initialization once. */
-#ifdef ERTS_SMP
void erts_bits_init_state(ERL_BITS_PROTO_0);
void erts_bits_destroy_state(ERL_BITS_PROTO_0);
-#endif
/*
@@ -185,7 +163,6 @@ void erts_new_bs_put_string(ERL_BITS_PROTO_2(byte* iptr, Uint num_bytes));
Uint erts_bits_bufs_size(void);
Uint32 erts_bs_get_unaligned_uint32(ErlBinMatchBuffer* mb);
-void erts_align_utf8_bytes(ErlBinMatchBuffer* mb, byte* buf);
Eterm erts_bs_get_utf8(ErlBinMatchBuffer* mb);
Eterm erts_bs_get_utf16(ErlBinMatchBuffer* mb, Uint flags);
Eterm erts_bs_append(Process* p, Eterm* reg, Uint live, Eterm build_size_term,
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index 28aaeeb479..6f8d2f8c35 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -60,7 +60,7 @@ static int max_main_threads;
static int reader_groups;
static ErtsCpuBindData *scheduler2cpu_map;
-static erts_smp_rwmtx_t cpuinfo_rwmtx;
+static erts_rwmtx_t cpuinfo_rwmtx;
typedef enum {
ERTS_CPU_BIND_UNDEFINED,
@@ -131,13 +131,11 @@ static erts_cpu_groups_map_t *reader_groups_map;
#define ERTS_MAX_CPU_TOPOLOGY_ID ((int) 0xffff)
-#ifdef ERTS_SMP
static void cpu_bind_order_sort(erts_cpu_topology_t *cpudata,
int size,
ErtsCpuBindOrder bind_order,
int mk_seq);
static void write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size);
-#endif
static void reader_groups_callback(int, ErtsSchedulerData *, int, void *);
static erts_cpu_groups_map_t *add_cpu_groups(int groups,
@@ -434,7 +432,6 @@ processor_order_cmp(const void *vx, const void *vy)
return 0;
}
-#ifdef ERTS_SMP
void
erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp)
{
@@ -444,7 +441,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp)
int cgcc_ix;
/* Unbind from cpu */
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
if (scheduler2cpu_map[esdp->no].bound_id >= 0
&& erts_unbind_from_cpu(cpuinfo) == 0) {
esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1;
@@ -463,7 +460,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp)
}
}
ASSERT(no_cpu_groups_callbacks == cgcc_ix);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++)
cgcc[cgcc_ix].callback(1,
@@ -481,7 +478,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp)
void
erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(esdp->run_queue));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(esdp->run_queue));
if (esdp->no <= max_main_threads)
erts_thr_set_main_status(1, (int) esdp->no);
@@ -490,7 +487,6 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
(void) ERTS_RUNQ_FLGS_SET(esdp->run_queue, ERTS_RUNQ_FLG_CHK_CPU_BIND);
}
-#endif
void
erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
@@ -499,8 +495,8 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
erts_cpu_groups_map_t *cgm;
erts_cpu_groups_callback_list_t *cgcl;
erts_cpu_groups_callback_call_t *cgcc;
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_runq_unlock(esdp->run_queue);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
cpu_id = scheduler2cpu_map[esdp->no].bind_id;
if (cpu_id >= 0 && cpu_id != scheduler2cpu_map[esdp->no].bound_id) {
res = erts_bind_to_cpu(cpuinfo, cpu_id);
@@ -543,7 +539,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
}
ASSERT(no_cpu_groups_callbacks == cgcc_ix);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++)
cgcc[cgcc_ix].callback(0,
@@ -553,10 +549,9 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
erts_free(ERTS_ALC_T_TMP, cgcc);
- erts_smp_runq_lock(esdp->run_queue);
+ erts_runq_lock(esdp->run_queue);
}
-#ifdef ERTS_SMP
void
erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp)
{
@@ -565,7 +560,7 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp)
erts_cpu_groups_callback_list_t *cgcl;
erts_cpu_groups_callback_call_t *cgcc;
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
cgcc = erts_alloc(ERTS_ALC_T_TMP,
(no_cpu_groups_callbacks
@@ -581,7 +576,7 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp)
}
ASSERT(no_cpu_groups_callbacks == cgcc_ix);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++)
cgcc[cgcc_ix].callback(0,
@@ -594,7 +589,6 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp)
if (esdp->no <= max_main_threads)
erts_thr_set_main_status(1, (int) esdp->no);
}
-#endif
static void
write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
@@ -602,13 +596,13 @@ write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
int s_ix = 1;
int cpu_ix;
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
if (cpu_bind_order != ERTS_CPU_BIND_NONE && size) {
cpu_bind_order_sort(cpudata, size, cpu_bind_order, 1);
- for (cpu_ix = 0; cpu_ix < size && cpu_ix < erts_no_schedulers; cpu_ix++)
+ for (cpu_ix = 0; cpu_ix < size && s_ix <= erts_no_schedulers; cpu_ix++)
if (erts_is_cpu_available(cpuinfo, cpudata[cpu_ix].logical))
scheduler2cpu_map[s_ix++].bind_id = cpudata[cpu_ix].logical;
}
@@ -702,9 +696,9 @@ Eterm
erts_bound_schedulers_term(Process *c_p)
{
ErtsCpuBindOrder order;
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
order = cpu_bind_order;
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
return bound_schedulers_term(order);
}
@@ -717,7 +711,7 @@ erts_bind_schedulers(Process *c_p, Eterm how)
int cpudata_size;
ErtsCpuBindOrder old_cpu_bind_order;
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
if (erts_bind_to_cpu(cpuinfo, -1) == -ENOTSUP) {
if (cpu_bind_order == ERTS_CPU_BIND_NONE
@@ -773,7 +767,7 @@ erts_bind_schedulers(Process *c_p, Eterm how)
done:
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
if (notify)
erts_sched_notify_check_cpu_bind();
@@ -793,9 +787,9 @@ erts_sched_bind_atthrcreate_child(int unbind)
{
int res = 0;
if (unbind) {
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
res = erts_unbind_from_cpu(cpuinfo);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
}
return res;
}
@@ -812,7 +806,7 @@ erts_sched_bind_atfork_prepare(void)
ErtsSchedulerData *esdp = erts_get_scheduler_data();
int unbind = esdp != NULL && erts_is_scheduler_bound(esdp);
if (unbind)
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
return unbind;
}
@@ -820,29 +814,18 @@ int
erts_sched_bind_atfork_child(int unbind)
{
if (unbind) {
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx)
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx)
|| erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
return erts_unbind_from_cpu(cpuinfo);
}
return 0;
}
-char *
-erts_sched_bind_atvfork_child(int unbind)
-{
- if (unbind) {
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx)
- || erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
- return erts_get_unbind_from_cpu_str(cpuinfo);
- }
- return "false";
-}
-
void
erts_sched_bind_atfork_parent(int unbind)
{
if (unbind)
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
}
Eterm
@@ -876,9 +859,9 @@ erts_fake_scheduler_bindings(Process *p, Eterm how)
return res;
}
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
create_tmp_cpu_topology_copy(&cpudata, &cpudata_size);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
if (!cpudata || fake_cpu_bind_order == ERTS_CPU_BIND_NONE)
ERTS_BIF_PREP_RET(res, am_false);
@@ -941,12 +924,12 @@ erts_get_schedulers_binds(Process *c_p)
Eterm res = make_tuple(hp);
*(hp++) = make_arityval(erts_no_schedulers);
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
for (ix = 1; ix <= erts_no_schedulers; ix++)
*(hp++) = (scheduler2cpu_map[ix].bound_id >= 0
? make_small(scheduler2cpu_map[ix].bound_id)
: AM_unbound);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
return res;
}
@@ -1357,7 +1340,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term)
int cpudata_size = 0;
Eterm res;
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
res = get_cpu_topology_term(c_p, ERTS_GET_USED_CPU_TOPOLOGY);
if (term == am_undefined) {
if (user_cpudata)
@@ -1378,7 +1361,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term)
}
else if (is_not_list(term)) {
error:
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
res = THE_NON_VALUE;
goto done;
}
@@ -1472,7 +1455,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term)
write_schedulers_bind_change(cpudata, cpudata_size);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
erts_sched_notify_check_cpu_bind();
done:
@@ -1626,7 +1609,7 @@ erts_get_cpu_topology_term(Process *c_p, Eterm which)
{
Eterm res;
int type;
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
if (ERTS_IS_ATOM_STR("used", which))
type = ERTS_GET_USED_CPU_TOPOLOGY;
else if (ERTS_IS_ATOM_STR("detected", which))
@@ -1639,7 +1622,7 @@ erts_get_cpu_topology_term(Process *c_p, Eterm which)
res = THE_NON_VALUE;
else
res = get_cpu_topology_term(c_p, type);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
return res;
}
@@ -1657,9 +1640,9 @@ get_logical_processors(int *conf, int *onln, int *avail)
void
erts_get_logical_processors(int *conf, int *onln, int *avail)
{
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
get_logical_processors(conf, onln, avail);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
}
void
@@ -1717,8 +1700,9 @@ erts_init_cpu_topology(void)
{
int ix;
- erts_smp_rwmtx_init(&cpuinfo_rwmtx, "cpu_info");
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_rwmtx_init(&cpuinfo_rwmtx, "cpu_info", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
scheduler2cpu_map = erts_alloc(ERTS_ALC_T_CPUDATA,
(sizeof(ErtsCpuBindData)
@@ -1736,13 +1720,13 @@ erts_init_cpu_topology(void)
NULL);
if (cpu_bind_order == ERTS_CPU_BIND_NONE)
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
else {
erts_cpu_topology_t *cpudata;
int cpudata_size;
create_tmp_cpu_topology_copy(&cpudata, &cpudata_size);
write_schedulers_bind_change(cpudata, cpudata_size);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
erts_sched_notify_check_cpu_bind();
destroy_tmp_cpu_topology_copy(cpudata);
}
@@ -1752,7 +1736,7 @@ int
erts_update_cpu_info(void)
{
int changed;
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwlock(&cpuinfo_rwmtx);
changed = erts_cpu_info_update(cpuinfo);
if (changed) {
erts_cpu_topology_t *cpudata;
@@ -1785,7 +1769,7 @@ erts_update_cpu_info(void)
write_schedulers_bind_change(cpudata, cpudata_size);
destroy_tmp_cpu_topology_copy(cpudata);
}
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rwunlock(&cpuinfo_rwmtx);
if (changed)
erts_sched_notify_check_cpu_bind();
return changed;
@@ -1802,7 +1786,7 @@ reader_groups_callback(int suspending,
void *unused)
{
if (reader_groups && esdp->no <= max_main_threads)
- erts_smp_rwmtx_set_reader_group(suspending ? 0 : group+1);
+ erts_rwmtx_set_reader_group(suspending ? 0 : group+1);
}
static Eterm get_cpu_groups_map(Process *c_p,
@@ -1831,9 +1815,9 @@ Eterm
erts_get_reader_groups_map(Process *c_p)
{
Eterm res;
- erts_smp_rwmtx_rlock(&cpuinfo_rwmtx);
+ erts_rwmtx_rlock(&cpuinfo_rwmtx);
res = get_cpu_groups_map(c_p, reader_groups_map, 1);
- erts_smp_rwmtx_runlock(&cpuinfo_rwmtx);
+ erts_rwmtx_runlock(&cpuinfo_rwmtx);
return res;
}
@@ -2213,7 +2197,7 @@ add_cpu_groups(int groups,
erts_cpu_groups_callback_list_t *cgcl;
erts_cpu_groups_map_t *cgm;
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
if (use_groups > max_main_threads)
use_groups = max_main_threads;
@@ -2254,52 +2238,13 @@ add_cpu_groups(int groups,
return cgm;
}
-static void
-remove_cpu_groups(erts_cpu_groups_callback_t callback, void *arg)
-{
- erts_cpu_groups_map_t *prev_cgm, *cgm;
- erts_cpu_groups_callback_list_t *prev_cgcl, *cgcl;
-
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
-
- no_cpu_groups_callbacks--;
-
- prev_cgm = NULL;
- for (cgm = cpu_groups_maps; cgm; cgm = cgm->next) {
- prev_cgcl = NULL;
- for (cgcl = cgm->callback_list; cgcl; cgcl = cgcl->next) {
- if (cgcl->callback == callback && cgcl->arg == arg) {
- if (prev_cgcl)
- prev_cgcl->next = cgcl->next;
- else
- cgm->callback_list = cgcl->next;
- erts_free(ERTS_ALC_T_CPU_GRPS_MAP, cgcl);
- if (!cgm->callback_list) {
- if (prev_cgm)
- prev_cgm->next = cgm->next;
- else
- cpu_groups_maps = cgm->next;
- if (cgm->array)
- erts_free(ERTS_ALC_T_CPU_GRPS_MAP, cgm->array);
- erts_free(ERTS_ALC_T_CPU_GRPS_MAP, cgm);
- }
- return;
- }
- prev_cgcl = cgcl;
- }
- prev_cgm = cgm;
- }
-
- erts_exit(ERTS_ABORT_EXIT, "Cpu groups not found\n");
-}
-
static int
cpu_groups_lookup(erts_cpu_groups_map_t *map,
ErtsSchedulerData *esdp)
{
int start, logical, ix;
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx)
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx)
|| erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
if (esdp->cpu_id < 0)
@@ -2327,26 +2272,8 @@ static void
update_cpu_groups_maps(void)
{
erts_cpu_groups_map_t *cgm;
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx));
for (cgm = cpu_groups_maps; cgm; cgm = cgm->next)
make_cpu_groups_map(cgm, 0);
}
-
-void
-erts_add_cpu_groups(int groups,
- erts_cpu_groups_callback_t callback,
- void *arg)
-{
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
- add_cpu_groups(groups, callback, arg);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
-}
-
-void erts_remove_cpu_groups(erts_cpu_groups_callback_t callback,
- void *arg)
-{
- erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
- remove_cpu_groups(callback, arg);
- erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx);
-}
diff --git a/erts/emulator/beam/erl_cpu_topology.h b/erts/emulator/beam/erl_cpu_topology.h
index 45324ac4a0..88bcad79ab 100644
--- a/erts/emulator/beam/erl_cpu_topology.h
+++ b/erts/emulator/beam/erl_cpu_topology.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -60,11 +60,9 @@ int erts_init_scheduler_bind_type_string(char *how);
int erts_init_cpu_topology_string(char *topology_str);
void erts_sched_check_cpu_bind(ErtsSchedulerData *esdp);
-#ifdef ERTS_SMP
void erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp);
void erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp);
void erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp);
-#endif
int erts_update_cpu_info(void);
@@ -85,22 +83,14 @@ void erts_sched_bind_atthrcreate_parent(int unbind);
int erts_sched_bind_atfork_prepare(void);
int erts_sched_bind_atfork_child(int unbind);
-char *erts_sched_bind_atvfork_child(int unbind);
void erts_sched_bind_atfork_parent(int unbind);
Eterm erts_fake_scheduler_bindings(Process *p, Eterm how);
Eterm erts_debug_cpu_groups_map(Process *c_p, int groups);
-
typedef void (*erts_cpu_groups_callback_t)(int,
ErtsSchedulerData *,
int,
void *);
-void erts_add_cpu_groups(int groups,
- erts_cpu_groups_callback_t callback,
- void *arg);
-void erts_remove_cpu_groups(erts_cpu_groups_callback_t callback,
- void *arg);
-
#endif
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 128a7b3865..c009a3bde8 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,9 +19,7 @@
*/
/*
- * This file contains the bif interface functions and
- * the handling of the "meta tables" ie the tables of
- * db tables.
+ * This file contains the 'ets' bif interface functions.
*/
/*
@@ -43,14 +41,43 @@
#include "erl_db.h"
#include "bif.h"
#include "big.h"
+#include "erl_binary.h"
-erts_smp_atomic_t erts_ets_misc_mem_size;
+erts_atomic_t erts_ets_misc_mem_size;
/*
** Utility macros
*/
+#define DB_BIF_GET_TABLE(TB, WHAT, KIND, BIF_IX) \
+ DB_GET_TABLE(TB, BIF_ARG_1, WHAT, KIND, BIF_IX, NULL, BIF_P)
+
+#define DB_TRAP_GET_TABLE(TB, TID, WHAT, KIND, BIF_EXP) \
+ DB_GET_TABLE(TB, TID, WHAT, KIND, 0, BIF_EXP, BIF_P)
+
+#define DB_GET_TABLE(TB, TID, WHAT, KIND, BIF_IX, BIF_EXP, PROC) \
+do { \
+ Uint freason__; \
+ if (!(TB = db_get_table(PROC, TID, WHAT, KIND, &freason__))) { \
+ return db_bif_fail(PROC, freason__, BIF_IX, BIF_EXP); \
+ } \
+}while(0)
+
+static BIF_RETTYPE db_bif_fail(Process* p, Uint freason,
+ Uint bif_ix, Export* bif_exp)
+{
+ if (freason == TRAP) {
+ if (!bif_exp)
+ bif_exp = bif_export[bif_ix];
+ p->arity = bif_exp->info.mfa.arity;
+ p->i = (BeamInstr*) bif_exp->addressv[erts_active_code_ix()];
+ }
+ p->freason = freason;
+ return THE_NON_VALUE;
+}
+
+
/* Get a key from any table structure and a tagged object */
#define TERM_GETKEY(tb, obj) db_getkey((tb)->common.keypos, (obj))
@@ -62,85 +89,244 @@ enum DbIterSafety {
ITER_SAFE_LOCKED, /* Safe while table is locked, not between trap calls */
ITER_SAFE /* No need to fixate at all */
};
-#ifdef ERTS_SMP
# define ITERATION_SAFETY(Proc,Tab) \
((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \
: (((Tab)->common.status & DB_FINE_LOCKED) ? ITER_UNSAFE : ITER_SAFE_LOCKED))
-#else
-# define ITERATION_SAFETY(Proc,Tab) \
- ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) \
- ? ITER_SAFE : ITER_SAFE_LOCKED)
-#endif
#define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP))
+/*
+ * "fixed_tabs": list of all fixed tables for a process
+ */
+#ifdef DEBUG
+static int fixed_tabs_find(DbFixation* first, DbFixation* fix);
+#endif
-/*
-** The main meta table, containing all ets tables.
-*/
-#ifdef ERTS_SMP
+static void fixed_tabs_insert(Process* p, DbFixation* fix)
+{
+ DbFixation* first = erts_psd_get(p, ERTS_PSD_ETS_FIXED_TABLES);
+
+ if (!first) {
+ fix->tabs.next = fix->tabs.prev = fix;
+ erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, fix);
+ }
+ else {
+ ASSERT(!fixed_tabs_find(first, fix));
+ fix->tabs.prev = first->tabs.prev;
+ fix->tabs.next = first;
+ fix->tabs.prev->tabs.next = fix;
+ first->tabs.prev = fix;
+ }
+}
+
+static void fixed_tabs_delete(Process *p, DbFixation* fix)
+{
+ if (fix->tabs.next == fix) {
+ DbFixation* old;
+ ASSERT(fix->tabs.prev == fix);
+ old = erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, NULL);
+ ASSERT(old == fix); (void)old;
+ }
+ else {
+ DbFixation *first = (DbFixation*) erts_psd_get(p, ERTS_PSD_ETS_FIXED_TABLES);
-#define ERTS_META_MAIN_TAB_LOCK_TAB_BITS 8
-#define ERTS_META_MAIN_TAB_LOCK_TAB_SIZE (1 << ERTS_META_MAIN_TAB_LOCK_TAB_BITS)
-#define ERTS_META_MAIN_TAB_LOCK_TAB_MASK (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE - 1)
+ ASSERT(fixed_tabs_find(first, fix));
+ fix->tabs.prev->tabs.next = fix->tabs.next;
+ fix->tabs.next->tabs.prev = fix->tabs.prev;
-typedef union {
- erts_smp_rwmtx_t rwmtx;
- byte cache_line_align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
- sizeof(erts_smp_rwmtx_t))];
-} erts_meta_main_tab_lock_t;
+ if (fix == first)
+ erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, fix->tabs.next);
+ }
+}
-static erts_meta_main_tab_lock_t *meta_main_tab_locks;
+#ifdef DEBUG
+static int fixed_tabs_find(DbFixation* first, DbFixation* fix)
+{
+ DbFixation* p;
+ if (!first) {
+ first = (DbFixation*) erts_psd_get(fix->procs.p, ERTS_PSD_ETS_FIXED_TABLES);
+ }
+ p = first;
+ do {
+ if (p == fix)
+ return 1;
+ ASSERT(p->procs.p == fix->procs.p);
+ ASSERT(p->tabs.next->tabs.prev == p);
+ p = p->tabs.next;
+ } while (p != first);
+ return 0;
+}
#endif
-static struct {
- union {
- DbTable *tb; /* Only directly readable if slot is ALIVE */
- UWord next_free; /* (index<<2)|1 if slot is FREE */
- }u;
-} *meta_main_tab;
-/* A slot in meta_main_tab can have three states:
- * FREE : Free to use for new table. Part of linked free-list.
- * ALIVE: Contains a table
- * DEAD : Contains a table that is being removed.
+
+/*
+ * fixing_procs: tree of all processes fixating a table
*/
-#define IS_SLOT_FREE(i) (meta_main_tab[(i)].u.next_free & 1)
-#define IS_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free & 2)
-#define IS_SLOT_ALIVE(i) (!(meta_main_tab[(i)].u.next_free & (1|2)))
-#define GET_NEXT_FREE_SLOT(i) (meta_main_tab[(i)].u.next_free >> 2)
-#define SET_NEXT_FREE_SLOT(i,next) (meta_main_tab[(i)].u.next_free = ((next)<<2)|1)
-#define MARK_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free |= 2)
-#define GET_ANY_SLOT_TAB(i) ((DbTable*)(meta_main_tab[(i)].u.next_free & ~(1|2))) /* dead or alive */
-
-static ERTS_INLINE erts_smp_rwmtx_t *
-get_meta_main_tab_lock(unsigned slot)
-{
-#ifdef ERTS_SMP
- return &meta_main_tab_locks[slot & ERTS_META_MAIN_TAB_LOCK_TAB_MASK].rwmtx;
+#define ERTS_RBT_PREFIX fixing_procs
+#define ERTS_RBT_T DbFixation
+#define ERTS_RBT_KEY_T Process*
+#define ERTS_RBT_FLAGS_T int
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->procs.parent = NULL; \
+ (T)->procs.right = NULL; \
+ (T)->procs.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) ((T)->procs.is_red)
+#define ERTS_RBT_SET_RED(T) ((T)->procs.is_red = 1)
+#define ERTS_RBT_IS_BLACK(T) (!(T)->procs.is_red)
+#define ERTS_RBT_SET_BLACK(T) ((T)->procs.is_red = 0)
+#define ERTS_RBT_GET_FLAGS(T) ((T)->procs.is_red)
+#define ERTS_RBT_SET_FLAGS(T, F) ((T)->procs.is_red = (F))
+#define ERTS_RBT_GET_PARENT(T) ((T)->procs.parent)
+#define ERTS_RBT_SET_PARENT(T, P) ((T)->procs.parent = (P))
+#define ERTS_RBT_GET_RIGHT(T) ((T)->procs.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->procs.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->procs.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->procs.left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->procs.p)
+#define ERTS_RBT_IS_LT(KX, KY) ((KX) < (KY))
+#define ERTS_RBT_IS_EQ(KX, KY) ((KX) == (KY))
+
+#define ERTS_RBT_WANT_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_FOREACH
+#define ERTS_RBT_WANT_FOREACH_DESTROY
+#ifdef DEBUG
+# define ERTS_RBT_WANT_LOOKUP
+#endif
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+#ifdef HARDDEBUG
+# error Do something useful with CHECK_TABLES maybe
#else
- return NULL;
+# define CHECK_TABLES()
#endif
+
+
+static void
+send_ets_transfer_message(Process *c_p, Process *proc,
+ ErtsProcLocks *locks,
+ DbTable *tb, Eterm heir_data);
+static void schedule_free_dbtable(DbTable* tb);
+static void delete_sched_table(Process *c_p, DbTable *tb);
+
+static void table_dec_refc(DbTable *tb, erts_aint_t min_val)
+{
+ if (erts_refc_dectest(&tb->common.refc, min_val) == 0)
+ schedule_free_dbtable(tb);
}
-static erts_smp_spinlock_t meta_main_tab_main_lock;
-static Uint meta_main_tab_first_free; /* Index of first free slot */
-static int meta_main_tab_cnt; /* Number of active tables */
-static int meta_main_tab_top; /* Highest ever used slot + 1 */
-static Uint meta_main_tab_slot_mask; /* The slot index part of an unnamed table id */
-static Uint meta_main_tab_seq_incr;
-static Uint meta_main_tab_seq_cnt = 0; /* To give unique(-ish) table identifiers */
+static int
+db_table_tid_destructor(Binary *unused)
+{
+ return 1;
+}
+
+static ERTS_INLINE void
+make_btid(DbTable *tb)
+{
+ Binary *btid = erts_create_magic_indirection(db_table_tid_destructor);
+ erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid);
+ erts_atomic_init_nob(tbref, (erts_aint_t) tb);
+ tb->common.btid = btid;
+ /*
+ * Table and magic indirection refer eachother,
+ * and table is refered once by being alive...
+ */
+ erts_refc_init(&tb->common.refc, 2);
+ erts_refc_inc(&btid->intern.refc, 1);
+}
+
+static ERTS_INLINE DbTable* btid2tab(Binary* btid)
+{
+ erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid);
+ return (DbTable *) erts_atomic_read_nob(tbref);
+}
+
+static DbTable *
+tid2tab(Eterm tid)
+{
+ DbTable *tb;
+ Binary *btid;
+ erts_atomic_t *tbref;
+ if (!is_internal_magic_ref(tid))
+ return NULL;
+
+ btid = erts_magic_ref2bin(tid);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(btid) != db_table_tid_destructor)
+ return NULL;
+
+ tbref = erts_binary_to_magic_indirection(btid);
+ tb = (DbTable *) erts_atomic_read_nob(tbref);
+
+ ASSERT(!tb || tb->common.btid == btid);
+
+ return tb;
+}
+
+static ERTS_INLINE int
+is_table_alive(DbTable *tb)
+{
+ erts_atomic_t *tbref;
+ DbTable *rtb;
+
+ tbref = erts_binary_to_magic_indirection(tb->common.btid);
+ rtb = (DbTable *) erts_atomic_read_nob(tbref);
+
+ ASSERT(!rtb || rtb == tb);
+
+ return !!rtb;
+}
+
+static ERTS_INLINE int
+is_table_named(DbTable *tb)
+{
+ return tb->common.type & DB_NAMED_TABLE;
+}
+
+
+static ERTS_INLINE void
+tid_clear(Process *c_p, DbTable *tb)
+{
+ DbTable *rtb;
+ Binary *btid = tb->common.btid;
+ erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid);
+ rtb = (DbTable *) erts_atomic_xchg_nob(tbref, (erts_aint_t) NULL);
+ ASSERT(!rtb || tb == rtb);
+ if (rtb) {
+ table_dec_refc(tb, 1);
+ delete_sched_table(c_p, tb);
+ }
+}
+
+static ERTS_INLINE Eterm
+make_tid(Process *c_p, DbTable *tb)
+{
+ Eterm *hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &c_p->off_heap, tb->common.btid);
+}
+
+Eterm
+erts_db_make_tid(Process *c_p, DbTableCommon *tb)
+{
+ return make_tid(c_p, (DbTable*)tb);
+}
+
+
/*
** The meta hash table of all NAMED ets tables
*/
-#ifdef ERTS_SMP
-# define META_NAME_TAB_LOCK_CNT 16
+# define META_NAME_TAB_LOCK_CNT 256
union {
- erts_smp_rwmtx_t lck;
- byte _cache_line_alignment[64];
+ erts_rwmtx_t lck;
+ byte align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_rwmtx_t))];
}meta_name_tab_rwlocks[META_NAME_TAB_LOCK_CNT];
-#endif
static struct meta_name_tab_entry {
union {
Eterm name_atom;
@@ -156,13 +342,11 @@ static unsigned meta_name_tab_mask;
static ERTS_INLINE
struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
- erts_smp_rwmtx_t** lockp)
+ erts_rwmtx_t** lockp)
{
unsigned bix = atom_val(name) & meta_name_tab_mask;
struct meta_name_tab_entry* bucket = &meta_name_tab[bix];
-#ifdef ERTS_SMP
*lockp = &meta_name_tab_rwlocks[bix % META_NAME_TAB_LOCK_CNT].lck;
-#endif
return bucket;
}
@@ -170,8 +354,7 @@ struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
typedef enum {
LCK_READ=1, /* read only access */
LCK_WRITE=2, /* exclusive table write access */
- LCK_WRITE_REC=3, /* record write access */
- LCK_NONE=4
+ LCK_WRITE_REC=3 /* record write access */
} db_lock_kind_t;
extern DbTableMethod db_hash;
@@ -181,11 +364,6 @@ int user_requested_db_max_tabs;
int erts_ets_realloc_always_moves;
int erts_ets_always_compress;
static int db_max_tabs;
-static DbTable *meta_pid_to_tab; /* Pid mapped to owned tables */
-static DbTable *meta_pid_to_fixed_tab; /* Pid mapped to fixed tables */
-static Eterm ms_delete_all;
-static Eterm ms_delete_all_buff[8]; /* To compare with for deletion
- of all objects */
/*
** Forward decls, static functions
@@ -195,22 +373,21 @@ static void fix_table_locked(Process* p, DbTable* tb);
static void unfix_table_locked(Process* p, DbTable* tb, db_lock_kind_t* kind);
static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data);
static void free_heir_data(DbTable*);
-static void free_fixations_locked(DbTable *tb);
-
-static int free_table_cont(Process *p,
- DbTable *tb,
- int first,
- int clean_meta_tab);
-static void print_table(int to, void *to_arg, int show, DbTable* tb);
-static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1);
+static SWord free_fixations_locked(Process* p, DbTable *tb);
+
+static void delete_all_objects_continue(Process* p, DbTable* tb);
+static SWord free_table_continue(Process *p, DbTable *tb, SWord reds);
+static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb);
+static BIF_RETTYPE ets_select_delete_trap_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1);
+static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1);
static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1);
static Eterm table_info(Process* p, DbTable* tb, Eterm What);
-static BIF_RETTYPE ets_select1(Process* p, Eterm arg1);
-static BIF_RETTYPE ets_select2(Process* p, Eterm arg1, Eterm arg2);
-static BIF_RETTYPE ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
+static BIF_RETTYPE ets_select1(Process* p, int bif_ix, Eterm arg1);
+static BIF_RETTYPE ets_select2(Process* p, DbTable*, Eterm tid, Eterm ms);
+static BIF_RETTYPE ets_select3(Process* p, DbTable*, Eterm tid, Eterm ms, Sint chunk_size);
/*
@@ -218,6 +395,7 @@ static BIF_RETTYPE ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
*/
Export ets_select_delete_continue_exp;
Export ets_select_count_continue_exp;
+Export ets_select_replace_continue_exp;
Export ets_select_continue_exp;
/*
@@ -230,28 +408,19 @@ free_dbtable(void *vtb)
{
DbTable *tb = (DbTable *) vtb;
#ifdef HARDDEBUG
- if (erts_smp_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) {
+ if (erts_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) {
erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n",
- erts_smp_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable),
+ erts_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable),
tb->common.fixations);
}
- erts_fprintf(stderr, "ets: free_dbtable(%T) deleted!!!\r\n",
- tb->common.id);
-
- erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
- print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_tab);
-
-
- erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_fixed_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size));
- print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_fixed_tab);
-#endif
-#ifdef ERTS_SMP
- erts_smp_rwmtx_destroy(&tb->common.rwlock);
- erts_smp_mtx_destroy(&tb->common.fixlock);
#endif
+ erts_rwmtx_destroy(&tb->common.rwlock);
+ erts_mtx_destroy(&tb->common.fixlock);
ASSERT(is_immed(tb->common.heir_data));
+
+ if (tb->common.btid)
+ erts_bin_release(tb->common.btid);
+
erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
}
@@ -266,41 +435,184 @@ static void schedule_free_dbtable(DbTable* tb)
* Caller is *not* allowed to access the specialized part
* (hash or tree) of *tb after this function has returned.
*/
- ASSERT(erts_refc_read(&tb->common.ref, 0) == 0);
+ ASSERT(erts_refc_read(&tb->common.refc, 0) == 0);
+ ASSERT(erts_refc_read(&tb->common.fix_count, 0) == 0);
erts_schedule_thr_prgr_later_cleanup_op(free_dbtable,
(void *) tb,
&tb->release.data,
sizeof(DbTable));
}
-static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
- char *rwname, char* fixname)
+static ERTS_INLINE void
+save_sched_table(Process *c_p, DbTable *tb)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ DbTable *first;
+
+ ASSERT(esdp);
+ erts_atomic_inc_nob(&esdp->ets_tables.count);
+ erts_refc_inc(&tb->common.refc, 1);
+
+ first = esdp->ets_tables.clist;
+ if (!first) {
+ tb->common.all.next = tb->common.all.prev = tb;
+ esdp->ets_tables.clist = tb;
+ }
+ else {
+ tb->common.all.prev = first->common.all.prev;
+ tb->common.all.next = first;
+ tb->common.all.prev->common.all.next = tb;
+ first->common.all.prev = tb;
+ }
+}
+
+static ERTS_INLINE void
+remove_sched_table(ErtsSchedulerData *esdp, DbTable *tb)
+{
+ ErtsEtsAllYieldData *eaydp;
+ ASSERT(esdp);
+ ASSERT(erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid))
+ == (Uint32) esdp->no);
+
+ ASSERT(erts_atomic_read_nob(&esdp->ets_tables.count) > 0);
+ erts_atomic_dec_nob(&esdp->ets_tables.count);
+
+ eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ if (eaydp->ongoing) {
+ /* ets:all() op process list from last to first... */
+ if (eaydp->tab == tb) {
+ if (eaydp->tab == esdp->ets_tables.clist)
+ eaydp->tab = NULL;
+ else
+ eaydp->tab = tb->common.all.prev;
+ }
+ }
+
+ if (tb->common.all.next == tb) {
+ ASSERT(tb->common.all.prev == tb);
+ ASSERT(esdp->ets_tables.clist == tb);
+ esdp->ets_tables.clist = NULL;
+ }
+ else {
+#ifdef DEBUG
+ DbTable *tmp = esdp->ets_tables.clist;
+ do {
+ if (tmp == tb) break;
+ tmp = tmp->common.all.next;
+ } while (tmp != esdp->ets_tables.clist);
+ ASSERT(tmp == tb);
+#endif
+ tb->common.all.prev->common.all.next = tb->common.all.next;
+ tb->common.all.next->common.all.prev = tb->common.all.prev;
+
+ if (esdp->ets_tables.clist == tb)
+ esdp->ets_tables.clist = tb->common.all.next;
+
+ }
+
+ table_dec_refc(tb, 0);
+}
+
+static void
+scheduled_remove_sched_table(void *vtb)
+{
+ remove_sched_table(erts_get_scheduler_data(), (DbTable *) vtb);
+}
+
+static void
+delete_sched_table(Process *c_p, DbTable *tb)
{
-#ifdef ERTS_SMP
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ Uint32 sid;
+
+ ASSERT(esdp);
+
+ ASSERT(tb->common.btid);
+ sid = erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid));
+ ASSERT(1 <= sid && sid <= erts_no_schedulers);
+ if (sid == (Uint32) esdp->no)
+ remove_sched_table(esdp, tb);
+ else
+ erts_schedule_misc_aux_work((int) sid, scheduled_remove_sched_table, tb);
+}
+
+static ERTS_INLINE void
+save_owned_table(Process *c_p, DbTable *tb)
+{
+ DbTable *first;
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ first = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES);
+
+ erts_refc_inc(&tb->common.refc, 1);
+
+ if (!first) {
+ tb->common.owned.next = tb->common.owned.prev = tb;
+ erts_psd_set(c_p, ERTS_PSD_ETS_OWNED_TABLES, tb);
+ }
+ else {
+ tb->common.owned.prev = first->common.owned.prev;
+ tb->common.owned.next = first;
+ tb->common.owned.prev->common.owned.next = tb;
+ first->common.owned.prev = tb;
+ }
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+}
+
+static ERTS_INLINE void
+delete_owned_table(Process *p, DbTable *tb)
+{
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (tb->common.owned.next == tb) {
+ DbTable* old;
+ ASSERT(tb->common.owned.prev == tb);
+ old = erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, NULL);
+ ASSERT(old == tb); (void)old;
+ }
+ else {
+ DbTable *first = (DbTable*) erts_psd_get(p, ERTS_PSD_ETS_OWNED_TABLES);
+#ifdef DEBUG
+ DbTable *tmp = first;
+ do {
+ if (tmp == tb) break;
+ tmp = tmp->common.owned.next;
+ } while (tmp != first);
+ ASSERT(tmp == tb);
+#endif
+ tb->common.owned.prev->common.owned.next = tb->common.owned.next;
+ tb->common.owned.next->common.owned.prev = tb->common.owned.prev;
+
+ if (tb == first)
+ erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, tb->common.owned.next);
+ }
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+ table_dec_refc(tb, 1);
+}
+
+static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock)
+{
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
if (use_frequent_read_lock)
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
if (erts_ets_rwmtx_spin_count >= 0)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
-#endif
-#ifdef ERTS_SMP
- erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
- rwname, tb->common.the_name);
- erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
+ erts_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_mtx_init(&tb->common.fixlock, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
-#endif
}
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
-#ifdef ERTS_SMP
- ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ erts_rwmtx_rwlock(&tb->common.rwlock);
tb->common.is_thread_safe = 1;
} else {
- erts_smp_rwmtx_rlock(&tb->common.rwlock);
+ erts_rwmtx_rlock(&tb->common.rwlock);
ASSERT(!tb->common.is_thread_safe);
}
}
@@ -309,14 +621,13 @@ static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
switch (kind) {
case LCK_WRITE:
case LCK_WRITE_REC:
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ erts_rwmtx_rwlock(&tb->common.rwlock);
break;
default:
- erts_smp_rwmtx_rlock(&tb->common.rwlock);
+ erts_rwmtx_rlock(&tb->common.rwlock);
}
ASSERT(tb->common.is_thread_safe);
}
-#endif
}
static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
@@ -326,18 +637,15 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
* DbTable structure. That is, ONLY the SMP case is allowed
* to follow the tb pointer!
*/
-#ifdef ERTS_SMP
- ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
-
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
ASSERT(tb->common.is_thread_safe);
tb->common.is_thread_safe = 0;
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
}
else {
ASSERT(!tb->common.is_thread_safe);
- erts_smp_rwmtx_runlock(&tb->common.rwlock);
+ erts_rwmtx_runlock(&tb->common.rwlock);
}
}
else {
@@ -345,27 +653,39 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
switch (kind) {
case LCK_WRITE:
case LCK_WRITE_REC:
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
break;
default:
- erts_smp_rwmtx_runlock(&tb->common.rwlock);
+ erts_rwmtx_runlock(&tb->common.rwlock);
}
}
-#endif
}
-
-static ERTS_INLINE void db_meta_lock(DbTable* tb, db_lock_kind_t kind)
+static ERTS_INLINE int db_is_exclusive(DbTable* tb, db_lock_kind_t kind)
{
- ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab);
- ASSERT(kind != LCK_WRITE);
- /* As long as we only lock for READ we don't have to lock at all. */
+ return kind != LCK_READ && tb->common.is_thread_safe;
}
-static ERTS_INLINE void db_meta_unlock(DbTable* tb, db_lock_kind_t kind)
+static DbTable* handle_lacking_permission(Process* p, DbTable* tb,
+ db_lock_kind_t kind,
+ Uint* freason_p)
{
- ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab);
- ASSERT(kind != LCK_WRITE);
+ if (tb->common.status & DB_BUSY) {
+ if (!db_is_exclusive(tb, kind)) {
+ db_unlock(tb, kind);
+ db_lock(tb, LCK_WRITE);
+ }
+ delete_all_objects_continue(p, tb);
+ db_unlock(tb, LCK_WRITE);
+ tb = NULL;
+ *freason_p = TRAP;
+ }
+ else if (p->common.id != tb->common.owner) {
+ db_unlock(tb, kind);
+ tb = NULL;
+ *freason_p = BADARG;
+ }
+ return tb;
}
static ERTS_INLINE
@@ -373,10 +693,10 @@ DbTable* db_get_table_aux(Process *p,
Eterm id,
int what,
db_lock_kind_t kind,
- int meta_already_locked)
+ int meta_already_locked,
+ Uint* freason_p)
{
- DbTable *tb = NULL;
- erts_smp_rwmtx_t *mtl = NULL;
+ DbTable *tb;
/*
* IMPORTANT: Only scheduler threads are allowed
@@ -385,32 +705,16 @@ DbTable* db_get_table_aux(Process *p,
*/
ASSERT(erts_get_scheduler_data());
- if (is_small(id)) {
- Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
- if (!meta_already_locked) {
- mtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rlock(mtl);
- }
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- else {
- erts_smp_rwmtx_t *test_mtl = get_meta_main_tab_lock(slot);
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(test_mtl)
- || erts_lc_rwmtx_is_rwlocked(test_mtl));
- }
-#endif
- if (slot < db_max_tabs && IS_SLOT_ALIVE(slot))
- tb = meta_main_tab[slot].u.tb;
- }
- else if (is_atom(id)) {
+ if (is_atom(id)) {
+ erts_rwmtx_t *mtl;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
if (!meta_already_locked)
- erts_smp_rwmtx_rlock(mtl);
+ erts_rwmtx_rlock(mtl);
else{
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
|| erts_lc_rwmtx_is_rwlocked(mtl));
- mtl = NULL;
}
-
+ tb = NULL;
if (bucket->pu.tb != NULL) {
if (is_atom(bucket->u.name_atom)) { /* single */
if (bucket->u.name_atom == id)
@@ -427,18 +731,29 @@ DbTable* db_get_table_aux(Process *p,
}
}
}
+ if (!meta_already_locked)
+ erts_rwmtx_runlock(mtl);
}
+ else
+ tb = tid2tab(id);
+
if (tb) {
db_lock(tb, kind);
- if (tb->common.id != id
- || ((tb->common.status & what) == 0
- && p->common.id != tb->common.owner)) {
- db_unlock(tb, kind);
- tb = NULL;
- }
+#ifdef ETS_DBG_FORCE_TRAP
+ if (erts_atomic_read_nob(&tb->common.dbg_force_trap) &&
+ erts_atomic_add_read_nob(&tb->common.dbg_force_trap, 2) & 2) {
+ db_unlock(tb, kind);
+ tb = NULL;
+ *freason_p = TRAP;
+ }
+ else
+#endif
+ if (ERTS_UNLIKELY(!(tb->common.status & what)))
+ tb = handle_lacking_permission(p, tb, kind, freason_p);
}
- if (mtl)
- erts_smp_rwmtx_runlock(mtl);
+ else
+ *freason_p = BADARG;
+
return tb;
}
@@ -446,32 +761,21 @@ static ERTS_INLINE
DbTable* db_get_table(Process *p,
Eterm id,
int what,
- db_lock_kind_t kind)
-{
- return db_get_table_aux(p, id, what, kind, 0);
-}
-
-/* Requires meta_main_tab_locks[slot] locked.
-*/
-static ERTS_INLINE void free_slot(int slot)
+ db_lock_kind_t kind,
+ Uint* freason_p)
{
- ASSERT(!IS_SLOT_FREE(slot));
- erts_smp_spin_lock(&meta_main_tab_main_lock);
- SET_NEXT_FREE_SLOT(slot,meta_main_tab_first_free);
- meta_main_tab_first_free = slot;
- meta_main_tab_cnt--;
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
+ return db_get_table_aux(p, id, what, kind, 0, freason_p);
}
static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
{
int ret = 0;
- erts_smp_rwmtx_t* rwlock;
+ erts_rwmtx_t* rwlock;
struct meta_name_tab_entry* new_entry;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
if (!have_lock)
- erts_smp_rwmtx_rwlock(rwlock);
+ erts_rwmtx_rwlock(rwlock);
if (bucket->pu.tb == NULL) { /* empty */
new_entry = bucket;
@@ -519,26 +823,25 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
done:
if (!have_lock)
- erts_smp_rwmtx_rwunlock(rwlock);
+ erts_rwmtx_rwunlock(rwlock);
return ret;
}
static int remove_named_tab(DbTable *tb, int have_lock)
{
int ret = 0;
- erts_smp_rwmtx_t* rwlock;
- Eterm name_atom = tb->common.id;
+ erts_rwmtx_t* rwlock;
+ Eterm name_atom = tb->common.the_name;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
-#ifdef ERTS_SMP
- if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
+ ASSERT(is_table_named(tb));
+ if (!have_lock && erts_rwmtx_tryrwlock(rwlock) == EBUSY) {
db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwlock(rwlock);
+ erts_rwmtx_rwlock(rwlock);
db_lock(tb, LCK_WRITE);
}
-#endif
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
if (bucket->pu.tb == NULL) {
goto done;
@@ -591,7 +894,7 @@ static int remove_named_tab(DbTable *tb, int have_lock)
done:
if (!have_lock)
- erts_smp_rwmtx_rwunlock(rwlock);
+ erts_rwmtx_rwunlock(rwlock);
return ret;
}
@@ -600,11 +903,11 @@ done:
*/
static ERTS_INLINE void local_fix_table(DbTable* tb)
{
- erts_refc_inc(&tb->common.ref, 1);
+ erts_refc_inc(&tb->common.fix_count, 1);
}
static ERTS_INLINE void local_unfix_table(DbTable* tb)
{
- if (erts_refc_dectest(&tb->common.ref, 0) == 0) {
+ if (erts_refc_dectest(&tb->common.fix_count, 0) == 0) {
ASSERT(IS_HASH_TABLE(tb->common.status));
db_unfix_table_hash(&(tb->hash));
}
@@ -627,9 +930,7 @@ BIF_RETTYPE ets_safe_fixtable_2(BIF_ALIST_2)
#endif
kind = (BIF_ARG_2 == am_true) ? LCK_READ : LCK_WRITE_REC;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, kind)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, kind, BIF_ets_safe_fixtable_2);
if (BIF_ARG_2 == am_true) {
fix_table_locked(BIF_P, tb);
@@ -659,11 +960,7 @@ BIF_RETTYPE ets_first_1(BIF_ALIST_1)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_first_1);
cret = tb->common.meth->db_first(BIF_P, tb, &ret);
@@ -686,11 +983,7 @@ BIF_RETTYPE ets_next_2(BIF_ALIST_2)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_next_2);
cret = tb->common.meth->db_next(BIF_P, tb, BIF_ARG_2, &ret);
@@ -713,11 +1006,7 @@ BIF_RETTYPE ets_last_1(BIF_ALIST_1)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_last_1);
cret = tb->common.meth->db_last(BIF_P, tb, &ret);
@@ -740,11 +1029,7 @@ BIF_RETTYPE ets_prev_2(BIF_ALIST_2)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_prev_2);
cret = tb->common.meth->db_prev(BIF_P,tb,BIF_ARG_2,&ret);
@@ -762,21 +1047,15 @@ BIF_RETTYPE ets_prev_2(BIF_ALIST_2)
BIF_RETTYPE ets_take_2(BIF_ALIST_2)
{
DbTable* tb;
-#ifdef DEBUG
int cret;
-#endif
Eterm ret;
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC);
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
-#ifdef DEBUG
- cret =
-#endif
- tb->common.meth->db_take(BIF_P, tb, BIF_ARG_2, &ret);
- ASSERT(cret == DB_ERROR_NONE);
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_take_2);
+
+ cret = tb->common.meth->db_take(BIF_P, tb, BIF_ARG_2, &ret);
+
+ ASSERT(cret == DB_ERROR_NONE); (void)cret;
db_unlock(tb, LCK_WRITE_REC);
BIF_RET(ret);
}
@@ -794,9 +1073,8 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
DeclareTmpHeap(cell,2,BIF_P);
DbUpdateHandle handle;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_element_3);
+
UseTmpHeap(2,BIF_P);
if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
goto bail_out;
@@ -867,9 +1145,9 @@ bail_out:
}
static BIF_RETTYPE
-do_update_counter(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Eterm arg4)
+do_update_counter(Process *p, DbTable* tb,
+ Eterm arg2, Eterm arg3, Eterm arg4)
{
- DbTable* tb;
int cret = DB_ERROR_BADITEM;
Eterm upop_list;
int list_size;
@@ -885,10 +1163,6 @@ do_update_counter(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Eterm arg4)
Eterm* hstart;
Eterm* hend;
- if ((tb = db_get_table(p, arg1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
-
UseTmpHeap(5, p);
if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
@@ -1062,7 +1336,11 @@ bail_out:
*/
BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
{
- return do_update_counter(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, THE_NON_VALUE);
+ DbTable* tb;
+
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_counter_3);
+
+ return do_update_counter(BIF_P, tb, BIF_ARG_2, BIF_ARG_3, THE_NON_VALUE);
}
/*
@@ -1074,10 +1352,14 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
*/
BIF_RETTYPE ets_update_counter_4(BIF_ALIST_4)
{
+ DbTable* tb;
+
if (is_not_tuple(BIF_ARG_4)) {
BIF_ERROR(BIF_P, BADARG);
}
- return do_update_counter(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_counter_4);
+
+ return do_update_counter(BIF_P, tb, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
}
@@ -1098,9 +1380,8 @@ BIF_RETTYPE ets_insert_2(BIF_ALIST_2)
kind = ((is_list(BIF_ARG_2) && CDR(list_val(BIF_ARG_2)) != NIL)
? LCK_WRITE : LCK_WRITE_REC);
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, kind, BIF_ets_insert_2);
+
if (BIF_ARG_2 == NIL) {
db_unlock(tb, kind);
BIF_RET(am_true);
@@ -1166,11 +1447,9 @@ BIF_RETTYPE ets_insert_new_2(BIF_ALIST_2)
/* More than one object, use LCK_WRITE to keep atomicity */
kind = LCK_WRITE;
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind);
- if (tb == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
- meth = tb->common.meth;
+ DB_BIF_GET_TABLE(tb, DB_WRITE, kind, BIF_ets_insert_new_2);
+
+ meth = tb->common.meth;
for (lst = BIF_ARG_2; is_list(lst); lst = CDR(list_val(lst))) {
if (is_not_tuple(CAR(list_val(lst)))
|| (arityval(*tuple_val(CAR(list_val(lst))))
@@ -1205,9 +1484,8 @@ BIF_RETTYPE ets_insert_new_2(BIF_ALIST_2)
/* Only one object (or NIL)
*/
kind = LCK_WRITE_REC;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, kind, BIF_ets_insert_new_2);
+
if (BIF_ARG_2 == NIL) {
db_unlock(tb, kind);
BIF_RET(am_true);
@@ -1244,7 +1522,9 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret;
- erts_smp_rwmtx_t *lck1, *lck2;
+ Eterm old_name;
+ erts_rwmtx_t *lck1, *lck2;
+ Uint freason;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1260,59 +1540,69 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
(void) meta_name_tab_bucket(BIF_ARG_2, &lck1);
- if (is_small(BIF_ARG_1)) {
- Uint slot = unsigned_val(BIF_ARG_1) & meta_main_tab_slot_mask;
- lck2 = get_meta_main_tab_lock(slot);
- }
- else if (is_atom(BIF_ARG_1)) {
- (void) meta_name_tab_bucket(BIF_ARG_1, &lck2);
+ if (is_atom(BIF_ARG_1)) {
+ old_name = BIF_ARG_1;
+ named_tab:
+ (void) meta_name_tab_bucket(old_name, &lck2);
if (lck1 == lck2)
lck2 = NULL;
else if (lck1 > lck2) {
- erts_smp_rwmtx_t *tmp = lck1;
+ erts_rwmtx_t *tmp = lck1;
lck1 = lck2;
lck2 = tmp;
}
}
else {
- BIF_ERROR(BIF_P, BADARG);
+ tb = tid2tab(BIF_ARG_1);
+ if (!tb)
+ BIF_ERROR(BIF_P, BADARG);
+ else {
+ if (is_table_named(tb)) {
+ old_name = tb->common.the_name;
+ goto named_tab;
+ }
+ lck2 = NULL;
+ }
}
- erts_smp_rwmtx_rwlock(lck1);
+ erts_rwmtx_rwlock(lck1);
if (lck2)
- erts_smp_rwmtx_rwlock(lck2);
+ erts_rwmtx_rwlock(lck2);
- tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1);
+ tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1, &freason);
if (!tb)
- goto badarg;
-
- if (is_not_atom(tb->common.id)) { /* Not a named table */
- tb->common.the_name = BIF_ARG_2;
- goto done;
- }
+ goto fail;
- if (!insert_named_tab(BIF_ARG_2, tb, 1))
- goto badarg;
+ if (is_table_named(tb)) {
+ if (!insert_named_tab(BIF_ARG_2, tb, 1))
+ goto badarg;
- if (!remove_named_tab(tb, 1))
- erts_exit(ERTS_ERROR_EXIT,"Could not find named tab %s", tb->common.id);
-
- tb->common.id = tb->common.the_name = BIF_ARG_2;
+ if (!remove_named_tab(tb, 1))
+ erts_exit(ERTS_ERROR_EXIT,"Could not find named tab %s", tb->common.the_name);
+ ret = BIF_ARG_2;
+ }
+ else { /* Not a named table */
+ ret = BIF_ARG_1;
+ }
+ tb->common.the_name = BIF_ARG_2;
- done:
- ret = tb->common.id;
db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwunlock(lck1);
+ erts_rwmtx_rwunlock(lck1);
if (lck2)
- erts_smp_rwmtx_rwunlock(lck2);
+ erts_rwmtx_rwunlock(lck2);
BIF_RET(ret);
- badarg:
+
+badarg:
+ freason = BADARG;
+
+fail:
if (tb)
db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwunlock(lck1);
+ erts_rwmtx_rwunlock(lck1);
if (lck2)
- erts_smp_rwmtx_rwunlock(lck2);
- BIF_ERROR(BIF_P, BADARG);
+ erts_rwmtx_rwunlock(lck2);
+
+ return db_bif_fail(BIF_P, freason, BIF_ets_rename_2, NULL);
}
@@ -1324,7 +1614,6 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
BIF_RETTYPE ets_new_2(BIF_ALIST_2)
{
DbTable* tb = NULL;
- int slot;
Eterm list;
Eterm val;
Eterm ret;
@@ -1333,15 +1622,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
Uint32 status;
Sint keypos;
int is_named, is_compressed;
-#ifdef ERTS_SMP
int is_fine_locked, frequent_read;
-#endif
-#ifdef DEBUG
int cret;
-#endif
- DeclareTmpHeap(meta_tuple,3,BIF_P);
DbTableMethod* meth;
- erts_smp_rwmtx_t *mmtl;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
@@ -1350,13 +1633,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
- status = DB_NORMAL | DB_SET | DB_PROTECTED;
+ status = DB_SET | DB_PROTECTED;
keypos = 1;
is_named = 0;
-#ifdef ERTS_SMP
is_fine_locked = 0;
frequent_read = 0;
-#endif
heir = am_none;
heir_data = (UWord) am_undefined;
is_compressed = erts_ets_always_compress;
@@ -1384,30 +1665,18 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
keypos = signed_val(tp[2]);
}
else if (tp[1] == am_write_concurrency) {
-#ifdef ERTS_SMP
if (tp[2] == am_true) {
is_fine_locked = 1;
} else if (tp[2] == am_false) {
is_fine_locked = 0;
} else break;
-#else
- if ((tp[2] != am_true) && (tp[2] != am_false)) {
- break;
- }
-#endif
}
else if (tp[1] == am_read_concurrency) {
-#ifdef ERTS_SMP
if (tp[2] == am_true) {
frequent_read = 1;
} else if (tp[2] == am_false) {
frequent_read = 0;
} else break;
-#else
- if ((tp[2] != am_true) && (tp[2] != am_false)) {
- break;
- }
-#endif
}
else if (tp[1] == am_heir && tp[2] == am_none) {
@@ -1433,6 +1702,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
}
else if (val == am_named_table) {
is_named = 1;
+ status |= DB_NAMED_TABLE;
}
else if (val == am_compressed) {
is_compressed = 1;
@@ -1448,11 +1718,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
}
if (IS_HASH_TABLE(status)) {
meth = &db_hash;
-#ifdef ERTS_SMP
if (is_fine_locked && !(status & DB_PRIVATE)) {
status |= DB_FINE_LOCKED;
}
-#endif
}
else if (IS_TREE_TABLE(status)) {
meth = &db_tree;
@@ -1461,10 +1729,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
-#ifdef ERTS_SMP
if (frequent_read && !(status & DB_PRIVATE))
status |= DB_FREQ_READ;
-#endif
/* we create table outside any table lock
* and take the unusal cost of destroy table if it
@@ -1473,89 +1739,54 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
{
DbTable init_tb;
- erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0);
+ erts_atomic_init_nob(&init_tb.common.memory_size, 0);
tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb, sizeof(DbTable));
- erts_smp_atomic_init_nob(&tb->common.memory_size,
- erts_smp_atomic_read_nob(&init_tb.common.memory_size));
+ erts_atomic_init_nob(&tb->common.memory_size,
+ erts_atomic_read_nob(&init_tb.common.memory_size));
}
tb->common.meth = meth;
tb->common.the_name = BIF_ARG_1;
tb->common.status = status;
-#ifdef ERTS_SMP
- tb->common.type = status & ERTS_ETS_TABLE_TYPES;
+ tb->common.type = status;
/* Note, 'type' is *read only* from now on... */
-#endif
- erts_refc_init(&tb->common.ref, 0);
- db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
- "db_tab", "db_tab_fix");
+ erts_refc_init(&tb->common.fix_count, 0);
+ db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ));
tb->common.keypos = keypos;
tb->common.owner = BIF_P->common.id;
set_heir(BIF_P, tb, heir, heir_data);
- erts_smp_atomic_init_nob(&tb->common.nitems, 0);
+ erts_atomic_init_nob(&tb->common.nitems, 0);
- tb->common.fixations = NULL;
+ tb->common.fixing_procs = NULL;
tb->common.compress = is_compressed;
-
-#ifdef DEBUG
- cret =
+#ifdef ETS_DBG_FORCE_TRAP
+ erts_atomic_init_nob(&tb->common.dbg_force_trap, erts_ets_dbg_force_trap);
#endif
- meth->db_create(BIF_P, tb);
- ASSERT(cret == DB_ERROR_NONE);
-
- erts_smp_spin_lock(&meta_main_tab_main_lock);
-
- if (meta_main_tab_cnt >= db_max_tabs) {
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
- erts_send_error_to_logger_str(BIF_P->group_leader,
- "** Too many db tables **\n");
- free_heir_data(tb);
- tb->common.meth->db_free_table(tb);
- free_dbtable((void *) tb);
- BIF_ERROR(BIF_P, SYSTEM_LIMIT);
- }
- slot = meta_main_tab_first_free;
- ASSERT(slot>=0 && slot<db_max_tabs);
- meta_main_tab_first_free = GET_NEXT_FREE_SLOT(slot);
- meta_main_tab_cnt++;
- if (slot >= meta_main_tab_top) {
- ASSERT(slot == meta_main_tab_top);
- meta_main_tab_top = slot + 1;
- }
+ cret = meth->db_create(BIF_P, tb);
+ ASSERT(cret == DB_ERROR_NONE); (void)cret;
- if (is_named) {
- ret = BIF_ARG_1;
- }
- else {
- ret = make_small(slot | meta_main_tab_seq_cnt);
- meta_main_tab_seq_cnt += meta_main_tab_seq_incr;
- ASSERT((unsigned_val(ret) & meta_main_tab_slot_mask) == slot);
- }
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
+ make_btid(tb);
- tb->common.id = ret;
- tb->common.slot = slot; /* store slot for erase */
+ if (is_named)
+ ret = BIF_ARG_1;
+ else
+ ret = make_tid(BIF_P, tb);
- mmtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rwlock(mmtl);
- meta_main_tab[slot].u.tb = tb;
- ASSERT(IS_SLOT_ALIVE(slot));
- erts_smp_rwmtx_rwunlock(mmtl);
+ save_sched_table(BIF_P, tb);
+ save_owned_table(BIF_P, tb);
if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
- mmtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rwlock(mmtl);
- free_slot(slot);
- erts_smp_rwmtx_rwunlock(mmtl);
+ tid_clear(BIF_P, tb);
+ delete_owned_table(BIF_P, tb);
db_lock(tb,LCK_WRITE);
free_heir_data(tb);
- tb->common.meth->db_free_table(tb);
- schedule_free_dbtable(tb);
+ tb->common.meth->db_free_empty_table(tb);
db_unlock(tb,LCK_WRITE);
+ table_dec_refc(tb, 0);
BIF_ERROR(BIF_P, BADARG);
}
@@ -1566,27 +1797,37 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
"ets:new(%T,%T)=%T; Process: %T, initial: %T:%T/%bpu\n",
BIF_ARG_1, BIF_ARG_2, ret, BIF_P->common.id,
BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]);
- erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
- erts_fprintf(stderr, "ets: new: meta_pid_to_fixed_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size));
#endif
- UseTmpHeap(3,BIF_P);
+ BIF_RET(ret);
+}
+
+/*
+** Retrieves the tid() of a named ets table.
+*/
+BIF_RETTYPE ets_whereis_1(BIF_ALIST_1)
+{
+ DbTable* tb;
+ Eterm res;
+ Uint freason;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- if (db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple,
- BIF_P->common.id,
- make_small(slot)),
- 0) != DB_ERROR_NONE) {
- erts_exit(ERTS_ERROR_EXIT,"Could not update ets metadata.");
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ, &freason)) == NULL) {
+ if (freason == BADARG)
+ BIF_RET(am_undefined);
+ else {
+ //ToDo: Could we avoid this
+ return db_bif_fail(BIF_P, freason, BIF_ets_whereis_1, NULL);
+ }
}
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- UnUseTmpHeap(3,BIF_P);
+ res = make_tid(BIF_P, tb);
+ db_unlock(tb, LCK_READ);
- BIF_RET(ret);
+ BIF_RET(res);
}
/*
@@ -1600,9 +1841,7 @@ BIF_RETTYPE ets_lookup_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_lookup_2);
cret = tb->common.meth->db_get(BIF_P, tb, BIF_ARG_2, &ret);
@@ -1630,9 +1869,7 @@ BIF_RETTYPE ets_member_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_member_2);
cret = tb->common.meth->db_member(tb, BIF_ARG_2, &ret);
@@ -1663,9 +1900,7 @@ BIF_RETTYPE ets_lookup_element_3(BIF_ALIST_3)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_lookup_element_3);
if (is_not_small(BIF_ARG_3) || ((index = signed_val(BIF_ARG_3)) < 1)) {
db_unlock(tb, LCK_READ);
@@ -1690,9 +1925,9 @@ BIF_RETTYPE ets_lookup_element_3(BIF_ALIST_3)
*/
BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
{
- int trap;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ SWord reds = initial_reds;
DbTable* tb;
- erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1703,9 +1938,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE, BIF_ets_delete_1);
/*
* Clear all access bits to prevent any ets operation to access the
@@ -1715,7 +1948,6 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status |= DB_DELETE;
if (tb->common.owner != BIF_P->common.id) {
- DeclareTmpHeap(meta_tuple,3,BIF_P);
/*
* The table is being deleted by a process other than its owner.
@@ -1723,50 +1955,33 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
* current process will be killed (e.g. by an EXIT signal), we will
* now transfer the ownership to the current process.
*/
- UseTmpHeap(3,BIF_P);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
-
- BIF_P->flags |= F_USING_DB;
- tb->common.owner = BIF_P->common.id;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple,
- BIF_P->common.id,
- make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- UnUseTmpHeap(3,BIF_P);
- }
- mmtl = get_meta_main_tab_lock(tb->common.slot);
-#ifdef ERTS_SMP
- if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
- /*
- * We keep our increased refc over this op in order to
- * prevent the table from disapearing.
- */
- db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwlock(mmtl);
- db_lock(tb, LCK_WRITE);
+ Process *rp = erts_proc_lookup_raw(tb->common.owner);
+ /*
+ * Process 'rp' might be exiting, but our table lock prevents it
+ * from terminating as it cannot complete erts_db_process_exiting().
+ */
+ ASSERT(!(ERTS_PSFLG_FREE & erts_atomic32_read_nob(&rp->state)));
+
+ delete_owned_table(rp, tb);
+ BIF_P->flags |= F_USING_DB;
+ tb->common.owner = BIF_P->common.id;
+ save_owned_table(BIF_P, tb);
}
-#endif
- /* We must keep the slot, to be found by db_proc_dead() if process dies */
- MARK_SLOT_DEAD(tb->common.slot);
- erts_smp_rwmtx_rwunlock(mmtl);
- if (is_atom(tb->common.id))
+
+ if (is_table_named(tb))
remove_named_tab(tb, 0);
/* disable inheritance */
free_heir_data(tb);
tb->common.heir = am_none;
- free_fixations_locked(tb);
-
- trap = free_table_cont(BIF_P, tb, 1, 1);
+ reds -= free_fixations_locked(BIF_P, tb);
+ tid_clear(BIF_P, tb);
db_unlock(tb, LCK_WRITE);
- if (trap) {
+
+ reds = free_table_continue(BIF_P, tb, reds);
+ if (reds < 0) {
/*
* Package the DbTable* pointer into a bignum so that it can be safely
* passed through a trap. We used to pass the DbTable* pointer directly
@@ -1776,9 +1991,11 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
Eterm *hp = HAlloc(BIF_P, 2);
hp[0] = make_pos_bignum_header(1);
hp[1] = (Eterm) tb;
+ BUMP_ALL_REDS(BIF_P);
BIF_TRAP1(&ets_delete_continue_exp, BIF_P, make_big(hp));
}
else {
+ BUMP_REDS(BIF_P, (initial_reds - reds));
BIF_RET(am_true);
}
}
@@ -1790,10 +2007,10 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
{
Process* to_proc = NULL;
ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
- DeclareTmpHeap(buf,5,BIF_P);
Eterm to_pid = BIF_ARG_2;
Eterm from_pid;
DbTable* tb = NULL;
+ Uint freason;
if (!is_internal_pid(to_pid)) {
goto badarg;
@@ -1803,43 +2020,35 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
goto badarg;
}
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
- || tb->common.owner != BIF_P->common.id) {
- goto badarg;
- }
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, &freason)) == NULL)
+ goto fail;
+ if (tb->common.owner != BIF_P->common.id)
+ goto badarg;
+
from_pid = tb->common.owner;
if (to_pid == from_pid) {
goto badarg; /* or should we be idempotent? return false maybe */
}
- UseTmpHeap(5,BIF_P);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
-
+ delete_owned_table(BIF_P, tb);
to_proc->flags |= F_USING_DB;
tb->common.owner = to_pid;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(buf,to_pid,make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+ save_owned_table(to_proc, tb);
db_unlock(tb,LCK_WRITE);
- erts_send_message(BIF_P, to_proc, &to_locks,
- TUPLE4(buf, am_ETS_TRANSFER,
- tb->common.id,
- from_pid,
- BIF_ARG_3),
- 0);
- erts_smp_proc_unlock(to_proc, to_locks);
+ send_ets_transfer_message(BIF_P, to_proc, &to_locks,
+ tb, BIF_ARG_3);
+ erts_proc_unlock(to_proc, to_locks);
UnUseTmpHeap(5,BIF_P);
BIF_RET(am_true);
badarg:
- if (to_proc != NULL && to_proc != BIF_P) erts_smp_proc_unlock(to_proc, to_locks);
+ freason = BADARG;
+fail:
+ if (to_proc != NULL && to_proc != BIF_P) erts_proc_unlock(to_proc, to_locks);
if (tb != NULL) db_unlock(tb, LCK_WRITE);
- BIF_ERROR(BIF_P, BADARG);
+
+ return db_bif_fail(BIF_P, freason, BIF_ets_give_away_3, NULL);
}
BIF_RETTYPE ets_setopts_2(BIF_ALIST_2)
@@ -1890,11 +2099,13 @@ BIF_RETTYPE ets_setopts_2(BIF_ALIST_2)
}
}
- if (tail != NIL
- || (tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
- || tb->common.owner != BIF_P->common.id) {
+ if (tail != NIL)
+ goto badarg;
+
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE, BIF_ets_setopts_2);
+
+ if (tb->common.owner != BIF_P->common.id)
goto badarg;
- }
if (heir_data != THE_NON_VALUE) {
free_heir_data(tb);
@@ -1918,23 +2129,84 @@ badarg:
}
/*
-** BIF to erase a whole table and release all memory it holds
-*/
-BIF_RETTYPE ets_delete_all_objects_1(BIF_ALIST_1)
+ * Common for delete_all_objects and select_delete(DeleteAll).
+ */
+BIF_RETTYPE ets_internal_delete_all_2(BIF_ALIST_2)
{
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ SWord reds = initial_reds;
+ Eterm nitems;
DbTable* tb;
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE, BIF_ets_internal_delete_all_2);
+
+ if (BIF_ARG_2 == am_undefined) {
+ nitems = erts_make_integer(erts_atomic_read_nob(&tb->common.nitems),
+ BIF_P);
+
+ reds = tb->common.meth->db_delete_all_objects(BIF_P, tb, reds);
+
+ ASSERT(!(tb->common.status & DB_BUSY));
+
+ if (reds < 0) {
+ /*
+ * Oboy, need to trap AND need to be atomic.
+ * Solved by cooperative trapping where every process trying to
+ * access this table (including this process) will "fail" to lookup
+ * the table and instead pitch in deleting objects
+ * (in delete_all_objects_continue) and then trap to self.
+ */
+ ASSERT((tb->common.status & (DB_PRIVATE|DB_PROTECTED|DB_PUBLIC))
+ ==
+ (tb->common.type & (DB_PRIVATE|DB_PROTECTED|DB_PUBLIC)));
+ tb->common.status &= ~(DB_PRIVATE|DB_PROTECTED|DB_PUBLIC);
+ tb->common.status |= DB_BUSY;
+ db_unlock(tb, LCK_WRITE);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_ets_internal_delete_all_2], BIF_P,
+ BIF_ARG_1, nitems);
+ }
+ else {
+ /* Done, no trapping needed */
+ BUMP_REDS(BIF_P, (initial_reds - reds));
+ }
- tb->common.meth->db_delete_all_objects(BIF_P, tb);
+ }
+ else {
+ /*
+ * The table lookup succeeded and second argument is nitems
+ * and not 'undefined', which means we have trapped at least once
+ * and are now done.
+ */
+ nitems = BIF_ARG_2;
+ }
db_unlock(tb, LCK_WRITE);
+ BIF_RET(nitems);
+}
- BIF_RET(am_true);
+static void delete_all_objects_continue(Process* p, DbTable* tb)
+{
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(p);
+ SWord reds = initial_reds;
+
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+
+ if ((tb->common.status & (DB_DELETE|DB_BUSY)) != DB_BUSY)
+ return;
+
+ reds = tb->common.meth->db_delete_all_objects(p, tb, reds);
+
+ if (reds < 0) {
+ BUMP_ALL_REDS(p);
+ }
+ else {
+ tb->common.status |= tb->common.type & (DB_PRIVATE|DB_PROTECTED|DB_PUBLIC);
+ tb->common.status &= ~DB_BUSY;
+ BUMP_REDS(p, (initial_reds - reds));
+ }
}
/*
@@ -1950,9 +2222,7 @@ BIF_RETTYPE ets_delete_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_delete_2);
cret = tb->common.meth->db_erase(tb,BIF_ARG_2,&ret);
@@ -1979,9 +2249,8 @@ BIF_RETTYPE ets_delete_object_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_delete_object_2);
+
if (is_not_tuple(BIF_ARG_2) ||
(arityval(*tuple_val(BIF_ARG_2)) < tb->common.keypos)) {
db_unlock(tb, LCK_WRITE_REC);
@@ -2004,7 +2273,7 @@ BIF_RETTYPE ets_delete_object_2(BIF_ALIST_2)
/*
** This is for trapping, cannot be called directly.
*/
-static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1)
+static BIF_RETTYPE ets_select_delete_trap_1(BIF_ALIST_1)
{
Process *p = BIF_P;
Eterm a1 = BIF_ARG_1;
@@ -2014,15 +2283,14 @@ static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1)
Eterm ret;
Eterm *tptr;
db_lock_kind_t kind = LCK_WRITE_REC;
-
+
CHECK_TABLES();
ASSERT(is_tuple(a1));
tptr = tuple_val(a1);
ASSERT(arityval(*tptr) >= 1);
- if ((tb = db_get_table(p, tptr[1], DB_WRITE, kind)) == NULL) {
- BIF_ERROR(p,BADARG);
- }
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_WRITE, kind,
+ &ets_select_delete_continue_exp);
cret = tb->common.meth->db_select_delete_continue(p,tb,a1,&ret);
@@ -2046,7 +2314,10 @@ static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1)
}
-BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
+/*
+ * ets:select_delete/2 without special case for "delete-all".
+ */
+BIF_RETTYPE ets_internal_select_delete_2(BIF_ALIST_2)
{
BIF_RETTYPE result;
DbTable* tb;
@@ -2056,25 +2327,13 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
CHECK_TABLES();
- if(eq(BIF_ARG_2, ms_delete_all)) {
- int nitems;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
- nitems = erts_smp_atomic_read_nob(&tb->common.nitems);
- tb->common.meth->db_delete_all_objects(BIF_P, tb);
- db_unlock(tb, LCK_WRITE);
- BIF_RET(erts_make_integer(nitems,BIF_P));
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_internal_select_delete_2);
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
safety = ITERATION_SAFETY(BIF_P,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_2, &ret);
+ cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P,tb);
@@ -2101,48 +2360,254 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
return result;
}
-/*
-** Return a list of tables on this node
-*/
-BIF_RETTYPE ets_all_0(BIF_ALIST_0)
+/*
+ * ets:all/0
+ *
+ * ets:all() calls ets:internal_request_all/0 which
+ * requests information about all tables from
+ * each scheduler thread. Each scheduler replies
+ * to the calling process with information about
+ * existing tables created on that specific scheduler.
+ */
+
+struct ErtsEtsAllReq_ {
+ erts_atomic32_t refc;
+ Process *proc;
+ ErtsOIRefStorage ref;
+ ErtsEtsAllReqList list[1]; /* one per scheduler */
+};
+
+#define ERTS_ETS_ALL_REQ_SIZE \
+ (sizeof(ErtsEtsAllReq) \
+ + (sizeof(ErtsEtsAllReqList) \
+ * (erts_no_schedulers - 1)))
+
+typedef struct {
+ ErtsEtsAllReq *ongoing;
+ ErlHeapFragment *hfrag;
+ DbTable *tab;
+ ErtsEtsAllReq *queue;
+} ErtsEtsAllData;
+
+/* Tables handled before yielding */
+#define ERTS_ETS_ALL_TB_YCNT 200
+/*
+ * Min yield count required before starting
+ * an operation that will require yield.
+ */
+#define ERTS_ETS_ALL_TB_YCNT_START 10
+
+#ifdef DEBUG
+/* Test yielding... */
+#undef ERTS_ETS_ALL_TB_YCNT
+#undef ERTS_ETS_ALL_TB_YCNT_START
+#define ERTS_ETS_ALL_TB_YCNT 10
+#define ERTS_ETS_ALL_TB_YCNT_START 1
+#endif
+
+static int
+ets_all_reply(ErtsSchedulerData *esdp, ErtsEtsAllReq **reqpp,
+ ErlHeapFragment **hfragpp, DbTable **tablepp,
+ int *yield_count_p)
{
- DbTable* tb;
- Eterm previous;
- int i;
- Eterm* hp;
- Eterm* hendp;
- int t_tabs_cnt;
- int t_top;
-
- erts_smp_spin_lock(&meta_main_tab_main_lock);
- t_tabs_cnt = meta_main_tab_cnt;
- t_top = meta_main_tab_top;
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
-
- hp = HAlloc(BIF_P, 2*t_tabs_cnt);
- hendp = hp + 2*t_tabs_cnt;
-
- previous = NIL;
- for(i = 0; i < t_top; i++) {
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i);
- erts_smp_rwmtx_rlock(mmtl);
- if (IS_SLOT_ALIVE(i)) {
- if (hp == hendp) {
- /* Racing table creator, grab some more heap space */
- t_tabs_cnt = 10;
- hp = HAlloc(BIF_P, 2*t_tabs_cnt);
- hendp = hp + 2*t_tabs_cnt;
- }
- tb = meta_main_tab[i].u.tb;
- previous = CONS(hp, tb->common.id, previous);
- hp += 2;
- }
- erts_smp_rwmtx_runlock(mmtl);
+ ErtsEtsAllReq *reqp = *reqpp;
+ ErlHeapFragment *hfragp = *hfragpp;
+ int ycount = *yield_count_p;
+ DbTable *tb, *first;
+ Uint sz;
+ Eterm list, msg, ref, *hp;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+
+ /*
+ * - save_sched_table() inserts at end of circular list.
+ *
+ * - This function scans from the end so we know that
+ * the amount of tables to scan wont grow even if we
+ * yield.
+ *
+ * - remove_sched_table() updates the table we yielded
+ * on if it removes it.
+ */
+
+ if (hfragp) {
+ /* Restart of a yielded operation... */
+ ASSERT(hfragp->used_size < hfragp->alloc_size);
+ ohp = &hfragp->off_heap;
+ hp = &hfragp->mem[hfragp->used_size];
+ list = *hp;
+ hfragp->used_size = hfragp->alloc_size;
+ first = esdp->ets_tables.clist;
+ tb = *tablepp;
+ }
+ else {
+ /* A new operation... */
+ ASSERT(!*tablepp);
+
+ /* Max heap size needed... */
+ sz = erts_atomic_read_nob(&esdp->ets_tables.count);
+ sz *= ERTS_MAGIC_REF_THING_SIZE + 2;
+ sz += 3 + ERTS_REF_THING_SIZE;
+ hfragp = new_message_buffer(sz);
+
+ hp = &hfragp->mem[0];
+ ohp = &hfragp->off_heap;
+ list = NIL;
+ first = esdp->ets_tables.clist;
+ tb = first ? first->common.all.prev : NULL;
+ }
+
+ if (tb) {
+ while (1) {
+ if (is_table_alive(tb)) {
+ Eterm tid;
+ if (is_table_named(tb))
+ tid = tb->common.the_name;
+ else
+ tid = erts_mk_magic_ref(&hp, ohp, tb->common.btid);
+ list = CONS(hp, tid, list);
+ hp += 2;
+ }
+
+ if (tb == first)
+ break;
+
+ tb = tb->common.all.prev;
+
+ if (--ycount <= 0) {
+ sz = hp - &hfragp->mem[0];
+ ASSERT(hfragp->alloc_size > sz + 1);
+ *hp = list;
+ hfragp->used_size = sz;
+ *hfragpp = hfragp;
+ *reqpp = reqp;
+ *tablepp = tb;
+ *yield_count_p = 0;
+ return 1; /* Yield! */
+ }
+ }
+ }
+
+ ref = erts_oiref_storage_make_ref(&reqp->ref, &hp);
+ msg = TUPLE2(hp, ref, list);
+ hp += 3;
+
+ sz = hp - &hfragp->mem[0];
+ ASSERT(sz <= hfragp->alloc_size);
+
+ hfragp = erts_resize_message_buffer(hfragp, sz, &msg, 1);
+
+ mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = hfragp;
+
+ erts_queue_message(reqp->proc, 0, mp, msg, am_system);
+
+ erts_proc_dec_refc(reqp->proc);
+
+ if (erts_atomic32_dec_read_nob(&reqp->refc) == 0)
+ erts_free(ERTS_ALC_T_ETS_ALL_REQ, reqp);
+
+ *reqpp = NULL;
+ *hfragpp = NULL;
+ *tablepp = NULL;
+ *yield_count_p = ycount;
+
+ return 0;
+}
+
+int
+erts_handle_yielded_ets_all_request(ErtsSchedulerData *esdp,
+ ErtsEtsAllYieldData *eaydp)
+{
+ int ix = (int) esdp->no - 1;
+ int yc = ERTS_ETS_ALL_TB_YCNT;
+
+ while (1) {
+ if (!eaydp->ongoing) {
+ ErtsEtsAllReq *ongoing;
+
+ if (!eaydp->queue)
+ return 0; /* All work completed! */
+
+ if (yc < ERTS_ETS_ALL_TB_YCNT_START &&
+ yc > erts_atomic_read_nob(&esdp->ets_tables.count))
+ return 1; /* Yield! */
+
+ eaydp->ongoing = ongoing = eaydp->queue;
+ if (ongoing->list[ix].next == ongoing)
+ eaydp->queue = NULL;
+ else {
+ ongoing->list[ix].next->list[ix].prev = ongoing->list[ix].prev;
+ ongoing->list[ix].prev->list[ix].next = ongoing->list[ix].next;
+ eaydp->queue = ongoing->list[ix].next;
+ }
+ ASSERT(!eaydp->hfrag);
+ ASSERT(!eaydp->tab);
+ }
+
+ if (ets_all_reply(esdp, &eaydp->ongoing, &eaydp->hfrag, &eaydp->tab, &yc))
+ return 1; /* Yield! */
}
- HRelease(BIF_P, hendp, hp);
- BIF_RET(previous);
}
+static void
+handle_ets_all_request(void *vreq)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsEtsAllYieldData *eayp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ ErtsEtsAllReq *req = (ErtsEtsAllReq *) vreq;
+
+ if (!eayp->ongoing && !eayp->queue) {
+ /* No ets:all() operations ongoing... */
+ ErlHeapFragment *hf = NULL;
+ DbTable *tb = NULL;
+ int yc = ERTS_ETS_ALL_TB_YCNT;
+ if (ets_all_reply(esdp, &req, &hf, &tb, &yc)) {
+ /* Yielded... */
+ ASSERT(hf);
+ eayp->ongoing = req;
+ eayp->hfrag = hf;
+ eayp->tab = tb;
+ erts_notify_new_aux_yield_work(esdp);
+ }
+ }
+ else {
+ /* Ongoing ets:all() operations; queue up this request... */
+ int ix = (int) esdp->no - 1;
+ if (!eayp->queue) {
+ req->list[ix].next = req;
+ req->list[ix].prev = req;
+ eayp->queue = req;
+ }
+ else {
+ req->list[ix].next = eayp->queue;
+ req->list[ix].prev = eayp->queue->list[ix].prev;
+ eayp->queue->list[ix].prev = req;
+ req->list[ix].prev->list[ix].next = req;
+ }
+ }
+}
+
+BIF_RETTYPE ets_internal_request_all_0(BIF_ALIST_0)
+{
+ Eterm ref = erts_make_ref(BIF_P);
+ ErtsEtsAllReq *req = erts_alloc(ERTS_ALC_T_ETS_ALL_REQ,
+ ERTS_ETS_ALL_REQ_SIZE);
+ erts_atomic32_init_nob(&req->refc,
+ (erts_aint32_t) erts_no_schedulers);
+ erts_oiref_storage_save(&req->ref, ref);
+ req->proc = BIF_P;
+ erts_proc_add_refc(BIF_P, (Sint) erts_no_schedulers);
+
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ handle_ets_all_request,
+ (void *) req);
+
+ handle_ets_all_request((void *) req);
+ BIF_RET(ref);
+}
/*
** db_slot(Db, Slot) -> [Items].
@@ -2155,9 +2620,8 @@ BIF_RETTYPE ets_slot_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_slot_2);
+
/* The slot number is checked in table specific code. */
cret = tb->common.meth->db_slot(BIF_P, tb, BIF_ARG_2, &ret);
db_unlock(tb, LCK_READ);
@@ -2177,41 +2641,53 @@ BIF_RETTYPE ets_slot_2(BIF_ALIST_2)
BIF_RETTYPE ets_match_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_match_1, BIF_ARG_1);
}
BIF_RETTYPE ets_match_2(BIF_ALIST_2)
{
+ DbTable* tb;
Eterm ms;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_2);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarDollar, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select2(BIF_P, tb, BIF_ARG_1, ms);
UnUseTmpHeap(8,BIF_P);
return res;
}
BIF_RETTYPE ets_match_3(BIF_ALIST_3)
{
+ DbTable* tb;
Eterm ms;
+ Sint chunk_size;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ /* Chunk size strictly greater than 0 */
+ if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_3);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarDollar, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select3(BIF_P, tb, BIF_ARG_1, ms, chunk_size);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2219,34 +2695,35 @@ BIF_RETTYPE ets_match_3(BIF_ALIST_3)
BIF_RETTYPE ets_select_3(BIF_ALIST_3)
{
- return ets_select3(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ DbTable* tb;
+ Sint chunk_size;
+
+ /* Chunk size strictly greater than 0 */
+ if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_3);
+
+ return ets_select3(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, chunk_size);
}
static BIF_RETTYPE
-ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3)
+ets_select3(Process* p, DbTable* tb, Eterm tid, Eterm ms, Sint chunk_size)
{
BIF_RETTYPE result;
- DbTable* tb;
int cret;
Eterm ret;
- Sint chunk_size;
enum DbIterSafety safety;
CHECK_TABLES();
- /* Chunk size strictly greater than 0 */
- if (is_not_small(arg3) || (chunk_size = signed_val(arg3)) <= 0) {
- BIF_ERROR(p, BADARG);
- }
- if ((tb = db_get_table(p, arg1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(p, tb,
- arg2, chunk_size,
+ cret = tb->common.meth->db_select_chunk(p, tb, tid,
+ ms, chunk_size,
0 /* not reversed */,
&ret);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
@@ -2292,9 +2769,8 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
tptr = tuple_val(a1);
ASSERT(arityval(*tptr) >= 1);
- if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_READ, kind,
+ &ets_select_continue_exp);
cret = tb->common.meth->db_select_continue(p, tb, a1,
&ret);
@@ -2324,10 +2800,10 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
BIF_RETTYPE ets_select_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_select_1, BIF_ARG_1);
}
-static BIF_RETTYPE ets_select1(Process *p, Eterm arg1)
+static BIF_RETTYPE ets_select1(Process *p, int bif_ix, Eterm arg1)
{
BIF_RETTYPE result;
DbTable* tb;
@@ -2349,10 +2825,10 @@ static BIF_RETTYPE ets_select1(Process *p, Eterm arg1)
BIF_ERROR(p, BADARG);
}
tptr = tuple_val(arg1);
- if (arityval(*tptr) < 1 ||
- (tb = db_get_table(p, tptr[1], DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
+ if (arityval(*tptr) < 1)
+ BIF_ERROR(p, BADARG);
+
+ DB_GET_TABLE(tb, tptr[1], DB_READ, LCK_READ, bif_ix, NULL, p);
safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
@@ -2388,34 +2864,27 @@ static BIF_RETTYPE ets_select1(Process *p, Eterm arg1)
BIF_RETTYPE ets_select_2(BIF_ALIST_2)
{
- return ets_select2(BIF_P, BIF_ARG_1, BIF_ARG_2);
+ DbTable* tb;
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_2);
+ return ets_select2(BIF_P, tb, BIF_ARG_1, BIF_ARG_2);
}
static BIF_RETTYPE
-ets_select2(Process* p, Eterm arg1, Eterm arg2)
+ets_select2(Process* p, DbTable* tb, Eterm tid, Eterm ms)
{
BIF_RETTYPE result;
- DbTable* tb;
int cret;
enum DbIterSafety safety;
Eterm ret;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
-
- if ((tb = db_get_table(p, arg1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(p, tb, arg2,
- 0, &ret);
+ cret = tb->common.meth->db_select(p, tb, tid, ms, 0, &ret);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
fix_table_locked(p, tb);
@@ -2458,9 +2927,9 @@ static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1)
tptr = tuple_val(a1);
ASSERT(arityval(*tptr) >= 1);
- if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
+
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_READ, kind,
+ &ets_select_count_continue_exp);
cret = tb->common.meth->db_select_count_continue(p, tb, a1, &ret);
@@ -2495,18 +2964,14 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2)
Eterm ret;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_count_2);
+
safety = ITERATION_SAFETY(BIF_P,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_count(BIF_P,tb,BIF_ARG_2, &ret);
+ cret = tb->common.meth->db_select_count(BIF_P,tb, BIF_ARG_1, BIF_ARG_2, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P, tb);
@@ -2532,6 +2997,100 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2)
return result;
}
+/*
+ ** This is for trapping, cannot be called directly.
+ */
+static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1)
+{
+ Process *p = BIF_P;
+ Eterm a1 = BIF_ARG_1;
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ Eterm *tptr;
+ db_lock_kind_t kind = LCK_WRITE_REC;
+
+ CHECK_TABLES();
+ ASSERT(is_tuple(a1));
+ tptr = tuple_val(a1);
+ ASSERT(arityval(*tptr) >= 1);
+
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_WRITE, kind,
+ &ets_select_replace_continue_exp);
+
+ cret = tb->common.meth->db_select_replace_continue(p,tb,a1,&ret);
+
+ if(!DID_TRAP(p,ret) && ITERATION_SAFETY(p,tb) != ITER_SAFE) {
+ unfix_table_locked(p, tb, &kind);
+ }
+
+ db_unlock(tb, kind);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
+ break;
+ }
+ erts_match_set_release_result(p);
+
+ return result;
+}
+
+
+BIF_RETTYPE ets_select_replace_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ enum DbIterSafety safety;
+
+ CHECK_TABLES();
+
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_select_replace_2);
+
+ if (tb->common.status & DB_BAG) {
+ /* Bag implementation presented both semantic consistency
+ and performance issues */
+ db_unlock(tb, LCK_WRITE_REC);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ safety = ITERATION_SAFETY(BIF_P,tb);
+ if (safety == ITER_UNSAFE) {
+ local_fix_table(tb);
+ }
+ cret = tb->common.meth->db_select_replace(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, &ret);
+
+ if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
+ fix_table_locked(BIF_P,tb);
+ }
+ if (safety == ITER_UNSAFE) {
+ local_unfix_table(tb);
+ }
+ db_unlock(tb, LCK_WRITE_REC);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ break;
+ }
+
+ erts_match_set_release_result(BIF_P);
+
+ return result;
+}
+
BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
{
@@ -2543,13 +3102,8 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
Sint chunk_size;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_reverse_3);
/* Chunk size strictly greater than 0 */
if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
@@ -2560,7 +3114,7 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(BIF_P,tb,
+ cret = tb->common.meth->db_select_chunk(BIF_P,tb, BIF_ARG_1,
BIF_ARG_2, chunk_size,
1 /* reversed */, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
@@ -2587,7 +3141,7 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
BIF_RETTYPE ets_select_reverse_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_select_reverse_1, BIF_ARG_1);
}
BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
@@ -2599,18 +3153,14 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
Eterm ret;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_reverse_2);
+
safety = ITERATION_SAFETY(BIF_P,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(BIF_P,tb,BIF_ARG_2,
+ cret = tb->common.meth->db_select(BIF_P,tb, BIF_ARG_1, BIF_ARG_2,
1 /*reversed*/, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
@@ -2637,45 +3187,63 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
/*
-** ets:match_object(Continuation), ets:match_object(Table, Pattern), ets:match_object(Table,Pattern,ChunkSize)
+** ets:match_object(Continuation)
*/
BIF_RETTYPE ets_match_object_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_match_object_1, BIF_ARG_1);
}
+/*
+** ets:match_object(Table, Pattern)
+*/
BIF_RETTYPE ets_match_object_2(BIF_ALIST_2)
{
+ DbTable* tb;
Eterm ms;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_object_2);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarUnderscore, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select2(BIF_P, tb, BIF_ARG_1, ms);
UnUseTmpHeap(8,BIF_P);
return res;
}
+/*
+** ets:match_object(Table,Pattern,ChunkSize)
+*/
BIF_RETTYPE ets_match_object_3(BIF_ALIST_3)
{
+ DbTable* tb;
+ Sint chunk_size;
Eterm ms;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ /* Chunk size strictly greater than 0 */
+ if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_object_3);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarUnderscore, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select3(BIF_P, tb, BIF_ARG_1, ms, chunk_size);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2689,22 +3257,24 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
static Eterm fields[] = {am_protection, am_keypos, am_type, am_named_table,
am_node, am_size, am_name, am_heir, am_owner, am_memory, am_compressed,
am_write_concurrency,
- am_read_concurrency};
+ am_read_concurrency,
+ am_id};
Eterm results[sizeof(fields)/sizeof(Eterm)];
DbTable* tb;
Eterm res;
int i;
Eterm* hp;
+ Uint freason;
/*Process* rp = NULL;*/
/* If/when we implement lockless private tables:
Eterm owner;
*/
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ, &freason)) == NULL) {
+ if (freason == BADARG && (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)))
BIF_RET(am_undefined);
- }
- BIF_ERROR(BIF_P, BADARG);
+ else
+ return db_bif_fail(BIF_P, freason, BIF_ets_info_1, NULL);
}
/* If/when we implement lockless private tables:
@@ -2725,7 +3295,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL
|| tb->common.owner != owner) {
if (BIF_P != rp)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
BIF_RET(am_undefined);
}
@@ -2739,7 +3309,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
db_unlock(tb, LCK_READ);
/*if (rp != NULL && rp != BIF_P)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/
hp = HAlloc(BIF_P, 5*sizeof(fields)/sizeof(Eterm));
res = NIL;
@@ -2761,12 +3331,13 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret = THE_NON_VALUE;
+ Uint freason;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ, &freason)) == NULL) {
+ if (freason == BADARG && (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)))
BIF_RET(am_undefined);
- }
- BIF_ERROR(BIF_P, BADARG);
+ else
+ return db_bif_fail(BIF_P, freason, BIF_ets_info_2, NULL);
}
ret = table_info(BIF_P, tb, BIF_ARG_2);
db_unlock(tb, LCK_READ);
@@ -2779,7 +3350,7 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2)
BIF_RETTYPE ets_is_compiled_ms_1(BIF_ALIST_1)
{
- if (erts_db_is_compiled_ms(BIF_ARG_1)) {
+ if (erts_db_get_match_prog_binary(BIF_ARG_1)) {
BIF_RET(am_true);
} else {
BIF_RET(am_false);
@@ -2794,9 +3365,9 @@ BIF_RETTYPE ets_match_spec_compile_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
- BIF_RET(erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mp));
+ BIF_RET(erts_db_make_match_prog_ref(BIF_P, mp, &hp));
}
BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
@@ -2805,24 +3376,18 @@ BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
int i = 0;
Eterm *hp;
Eterm lst;
- ProcBin *bp;
Binary *mp;
Eterm res;
Uint32 dummy;
- if (!(is_list(BIF_ARG_1) || BIF_ARG_1 == NIL) || !is_binary(BIF_ARG_2)) {
+ if (!(is_list(BIF_ARG_1) || BIF_ARG_1 == NIL)) {
error:
BIF_ERROR(BIF_P, BADARG);
}
- bp = (ProcBin*) binary_val(BIF_ARG_2);
- if (thing_subtag(bp->thing_word) != REFC_BINARY_SUBTAG) {
- goto error;
- }
- mp = bp->val;
- if (!IsMatchProgBinary(mp)) {
+ mp = erts_db_get_match_prog_binary(BIF_ARG_2);
+ if (!mp)
goto error;
- }
if (BIF_ARG_1 == NIL) {
BIF_RET(BIF_ARG_3);
@@ -2859,17 +3424,14 @@ int erts_ets_rwmtx_spin_count = -1;
void init_db(ErtsDbSpinCount db_spin_count)
{
- DbTable init_tb;
int i;
- Eterm *hp;
unsigned bits;
size_t size;
-#ifdef ERTS_SMP
int max_spin_count = (1 << 15) - 1; /* internal limit */
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
switch (db_spin_count) {
case ERTS_DB_SPNCNT_NONE:
@@ -2908,23 +3470,13 @@ void init_db(ErtsDbSpinCount db_spin_count)
if (erts_ets_rwmtx_spin_count >= 0)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
- meta_main_tab_locks =
- erts_alloc_permanent_cache_aligned(ERTS_ALC_T_DB_TABLES,
- sizeof(erts_meta_main_tab_lock_t)
- * ERTS_META_MAIN_TAB_LOCK_TAB_SIZE);
-
- for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) {
- erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt,
- "meta_main_tab_slot", make_small(i));
- }
- erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
- erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
- "meta_name_tab", make_small(i));
+ erts_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
+ "meta_name_tab", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DB);
}
-#endif
- erts_smp_atomic_init_nob(&erts_ets_misc_mem_size, 0);
+ erts_atomic_init_nob(&erts_ets_misc_mem_size, 0);
db_initialize_util();
if (user_requested_db_max_tabs < DB_DEF_MAX_TABS)
@@ -2937,22 +3489,12 @@ void init_db(ErtsDbSpinCount db_spin_count)
erts_exit(ERTS_ERROR_EXIT,"Max limit for ets tabled too high %u (max %u).",
db_max_tabs, ((Uint)1)<<SMALL_BITS);
}
- meta_main_tab_slot_mask = (((Uint)1)<<bits) - 1;
- meta_main_tab_seq_incr = (((Uint)1)<<bits);
-
- size = sizeof(*meta_main_tab)*db_max_tabs;
- meta_main_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size);
- ERTS_ETS_MISC_MEM_ADD(size);
-
- meta_main_tab_cnt = 0;
- meta_main_tab_top = 0;
- for (i=1; i<db_max_tabs; i++) {
- SET_NEXT_FREE_SLOT(i-1,i);
- }
- SET_NEXT_FREE_SLOT(db_max_tabs-1, (Uint)-1);
- meta_main_tab_first_free = 0;
- meta_name_tab_mask = (((Uint) 1)<<(bits-1)) - 1; /* At least half the size of main tab */
+ /*
+ * We don't have ony hard limit for number of tables anymore, .
+ * but we use 'db_max_tabs' to determine size of name hash table.
+ */
+ meta_name_tab_mask = (((Uint) 1)<<bits) - 1;
size = sizeof(struct meta_name_tab_entry)*(meta_name_tab_mask+1);
meta_name_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size);
ERTS_ETS_MISC_MEM_ADD(size);
@@ -2965,173 +3507,44 @@ void init_db(ErtsDbSpinCount db_spin_count)
db_initialize_hash();
db_initialize_tree();
- /*TT*/
- /* Create meta table invertion. */
- erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0);
- meta_pid_to_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
- &init_tb,
- sizeof(DbTable));
- erts_smp_atomic_init_nob(&meta_pid_to_tab->common.memory_size,
- erts_smp_atomic_read_nob(&init_tb.common.memory_size));
-
- meta_pid_to_tab->common.id = NIL;
- meta_pid_to_tab->common.the_name = am_true;
- meta_pid_to_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED);
-#ifdef ERTS_SMP
- meta_pid_to_tab->common.type
- = meta_pid_to_tab->common.status & ERTS_ETS_TABLE_TYPES;
- /* Note, 'type' is *read only* from now on... */
- meta_pid_to_tab->common.is_thread_safe = 0;
-#endif
- meta_pid_to_tab->common.keypos = 1;
- meta_pid_to_tab->common.owner = NIL;
- erts_smp_atomic_init_nob(&meta_pid_to_tab->common.nitems, 0);
- meta_pid_to_tab->common.slot = -1;
- meta_pid_to_tab->common.meth = &db_hash;
- meta_pid_to_tab->common.compress = 0;
-
- erts_refc_init(&meta_pid_to_tab->common.ref, 0);
- /* Neither rwlock or fixlock used
- db_init_lock(meta_pid_to_tab, "meta_pid_to_tab", "meta_pid_to_tab_FIX");*/
-
- if (db_create_hash(NULL, meta_pid_to_tab) != DB_ERROR_NONE) {
- erts_exit(ERTS_ERROR_EXIT,"Unable to create ets metadata tables.");
- }
-
- erts_smp_atomic_set_nob(&init_tb.common.memory_size, 0);
- meta_pid_to_fixed_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
- &init_tb,
- sizeof(DbTable));
- erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.memory_size,
- erts_smp_atomic_read_nob(&init_tb.common.memory_size));
-
- meta_pid_to_fixed_tab->common.id = NIL;
- meta_pid_to_fixed_tab->common.the_name = am_true;
- meta_pid_to_fixed_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED);
-#ifdef ERTS_SMP
- meta_pid_to_fixed_tab->common.type
- = meta_pid_to_fixed_tab->common.status & ERTS_ETS_TABLE_TYPES;
- /* Note, 'type' is *read only* from now on... */
- meta_pid_to_fixed_tab->common.is_thread_safe = 0;
-#endif
- meta_pid_to_fixed_tab->common.keypos = 1;
- meta_pid_to_fixed_tab->common.owner = NIL;
- erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.nitems, 0);
- meta_pid_to_fixed_tab->common.slot = -1;
- meta_pid_to_fixed_tab->common.meth = &db_hash;
- meta_pid_to_fixed_tab->common.compress = 0;
-
- erts_refc_init(&meta_pid_to_fixed_tab->common.ref, 0);
- /* Neither rwlock or fixlock used
- db_init_lock(meta_pid_to_fixed_tab, "meta_pid_to_fixed_tab", "meta_pid_to_fixed_tab_FIX");*/
-
- if (db_create_hash(NULL, meta_pid_to_fixed_tab) != DB_ERROR_NONE) {
- erts_exit(ERTS_ERROR_EXIT,"Unable to create ets metadata tables.");
- }
-
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_delete_continue_exp,
- am_ets, am_atom_put("delete_trap",11), 1,
- &ets_select_delete_1);
+ am_ets, ERTS_MAKE_AM("select_delete_trap"), 1,
+ &ets_select_delete_trap_1);
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_count_continue_exp,
- am_ets, am_atom_put("count_trap",11), 1,
+ am_ets, ERTS_MAKE_AM("count_trap"), 1,
&ets_select_count_1);
/* Non visual BIF to trap to. */
+ erts_init_trap_export(&ets_select_replace_continue_exp,
+ am_ets, ERTS_MAKE_AM("replace_trap"), 1,
+ &ets_select_replace_1);
+
+ /* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_continue_exp,
- am_ets, am_atom_put("select_trap",11), 1,
+ am_ets, ERTS_MAKE_AM("select_trap"), 1,
&ets_select_trap_1);
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_delete_continue_exp,
- am_ets, am_atom_put("delete_trap",11), 1,
+ am_ets, ERTS_MAKE_AM("delete_trap"), 1,
&ets_delete_trap);
-
- hp = ms_delete_all_buff;
- ms_delete_all = CONS(hp, am_true, NIL);
- hp += 2;
- ms_delete_all = TUPLE3(hp,am_Underscore,NIL,ms_delete_all);
- hp +=4;
- ms_delete_all = CONS(hp, ms_delete_all,NIL);
}
-#define ARRAY_CHUNK 100
-
-typedef enum {
- ErtsDbProcCleanupProgressTables,
- ErtsDbProcCleanupProgressFixations,
- ErtsDbProcCleanupProgressDone,
-} ErtsDbProcCleanupProgress;
-
-typedef enum {
- ErtsDbProcCleanupOpGetTables,
- ErtsDbProcCleanupOpDeleteTables,
- ErtsDbProcCleanupOpGetFixations,
- ErtsDbProcCleanupOpDeleteFixations,
- ErtsDbProcCleanupOpDone
-} ErtsDbProcCleanupOperation;
-
-typedef struct {
- ErtsDbProcCleanupProgress progress;
- ErtsDbProcCleanupOperation op;
- struct {
- Eterm arr[ARRAY_CHUNK];
- int size;
- int ix;
- int clean_ix;
- } slots;
-} ErtsDbProcCleanupState;
-
-
-static void
-proc_exit_cleanup_tables_meta_data(Eterm pid, ErtsDbProcCleanupState *state)
-{
- ASSERT(state->slots.clean_ix <= state->slots.ix);
- if (state->slots.clean_ix < state->slots.ix) {
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- if (state->slots.size < ARRAY_CHUNK
- && state->slots.ix == state->slots.size) {
- Eterm dummy;
- db_erase_hash(meta_pid_to_tab,pid,&dummy);
- }
- else {
- int ix;
- /* Need to erase each explicitly */
- for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++)
- db_erase_bag_exact2(meta_pid_to_tab,
- pid,
- state->slots.arr[ix]);
- }
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- state->slots.clean_ix = state->slots.ix;
- }
+void
+erts_ets_sched_spec_data_init(ErtsSchedulerData *esdp)
+{
+ ErtsEtsAllYieldData *eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ eaydp->ongoing = NULL;
+ eaydp->hfrag = NULL;
+ eaydp->tab = NULL;
+ eaydp->queue = NULL;
+ esdp->ets_tables.clist = NULL;
+ erts_atomic_init_nob(&esdp->ets_tables.count, 0);
}
-static void
-proc_exit_cleanup_fixations_meta_data(Eterm pid, ErtsDbProcCleanupState *state)
-{
- ASSERT(state->slots.clean_ix <= state->slots.ix);
- if (state->slots.clean_ix < state->slots.ix) {
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- if (state->slots.size < ARRAY_CHUNK
- && state->slots.ix == state->slots.size) {
- Eterm dummy;
- db_erase_hash(meta_pid_to_fixed_tab,pid,&dummy);
- }
- else {
- int ix;
- /* Need to erase each explicitly */
- for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++)
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- pid,
- state->slots.arr[ix]);
- }
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- state->slots.clean_ix = state->slots.ix;
- }
-}
/* In: Table LCK_WRITE
** Return TRUE : ok, table not mine and NOT locked anymore.
@@ -3141,7 +3554,6 @@ static int give_away_to_heir(Process* p, DbTable* tb)
{
Process* to_proc;
ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
- DeclareTmpHeap(buf,5,p);
Eterm to_pid;
UWord heir_data;
@@ -3162,14 +3574,14 @@ retry:
if (tb->common.owner != p->common.id) {
if (to_proc != NULL ) {
- erts_smp_proc_unlock(to_proc, to_locks);
+ erts_proc_unlock(to_proc, to_locks);
}
db_unlock(tb,LCK_WRITE);
return !0; /* ok, someone already gave my table away */
}
if (tb->common.heir != to_pid) { /* someone changed the heir */
if (to_proc != NULL ) {
- erts_smp_proc_unlock(to_proc, to_locks);
+ erts_proc_unlock(to_proc, to_locks);
}
if (to_pid == p->common.id || to_pid == am_none) {
return 0; /* no real heir, table still mine */
@@ -3182,22 +3594,15 @@ retry:
}
if (to_proc->common.u.alive.started_interval
!= tb->common.heir_started_interval) {
- erts_smp_proc_unlock(to_proc, to_locks);
+ erts_proc_unlock(to_proc, to_locks);
return 0; /* heir dead and pid reused, table still mine */
}
- UseTmpHeap(5,p);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
+ delete_owned_table(p, tb);
to_proc->flags |= F_USING_DB;
tb->common.owner = to_pid;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(buf,to_pid,make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- UnUseTmpHeap(5,p);
+ save_owned_table(to_proc, tb);
+
db_unlock(tb,LCK_WRITE);
heir_data = tb->common.heir_data;
if (!is_immed(heir_data)) {
@@ -3205,17 +3610,89 @@ retry:
ASSERT(arityval(*tpv) == 1);
heir_data = tpv[1];
}
- erts_send_message(p, to_proc, &to_locks,
- TUPLE4(buf,
- am_ETS_TRANSFER,
- tb->common.id,
- p->common.id,
- heir_data),
- 0);
- erts_smp_proc_unlock(to_proc, to_locks);
+ send_ets_transfer_message(p, to_proc, &to_locks, tb, heir_data);
+ erts_proc_unlock(to_proc, to_locks);
return !0;
}
+static void
+send_ets_transfer_message(Process *c_p, Process *proc,
+ ErtsProcLocks *locks,
+ DbTable *tb, Eterm heir_data)
+{
+ Uint hsz, hd_sz;
+ ErtsMessage *mp;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+ Eterm tid, hd_copy, msg, sender;
+
+ hsz = 5;
+ if (!is_table_named(tb))
+ hsz += ERTS_MAGIC_REF_THING_SIZE;
+ if (is_immed(heir_data))
+ hd_sz = 0;
+ else {
+ hd_sz = size_object(heir_data);
+ hsz += hd_sz;
+ }
+
+ mp = erts_alloc_message_heap(proc, locks, hsz, &hp, &ohp);
+ if (is_table_named(tb))
+ tid = tb->common.the_name;
+ else
+ tid = erts_mk_magic_ref(&hp, ohp, tb->common.btid);
+ if (!hd_sz)
+ hd_copy = heir_data;
+ else
+ hd_copy = copy_struct(heir_data, hd_sz, &hp, ohp);
+ sender = c_p->common.id;
+ msg = TUPLE4(hp, am_ETS_TRANSFER, tid, sender, hd_copy);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_proc_message(c_p, proc, *locks, mp, msg);
+}
+
+
+/* Auto-release fixation from exiting process */
+static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
+{
+ DbTable* tb = btid2tab(fix->tabs.btid);
+ SWord work = 0;
+
+ ASSERT(fix->procs.p == p); (void)p;
+ if (tb) {
+ db_lock(tb, LCK_WRITE_REC);
+ if (!(tb->common.status & DB_DELETE)) {
+ erts_aint_t diff;
+ erts_mtx_lock(&tb->common.fixlock);
+
+ ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p));
+
+ diff = -((erts_aint_t) fix->counter);
+ erts_refc_add(&tb->common.fix_count,diff,0);
+ fix->counter = 0;
+
+ fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
+
+ erts_mtx_unlock(&tb->common.fixlock);
+ if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
+ work += db_unfix_table_hash(&(tb->hash));
+ }
+
+ ASSERT(sizeof(DbFixation) == ERTS_ALC_DBG_BLK_SZ(fix));
+ ERTS_DB_ALC_MEM_UPDATE_(tb, sizeof(DbFixation), 0);
+ }
+ db_unlock(tb, LCK_WRITE_REC);
+ }
+
+ erts_bin_release(fix->tabs.btid);
+ erts_free(ERTS_ALC_T_DB_FIXATION, fix);
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ ++work;
+
+ return work;
+}
+
+
/*
* erts_db_process_exiting() is called when a process terminates.
* It returns 0 when completely done, and !0 when it wants to
@@ -3229,276 +3706,154 @@ retry:
int
erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
{
- ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.terminate;
+ typedef struct {
+ enum {
+ GET_OWNED_TABLE,
+ FREE_OWNED_TABLE,
+ UNFIX_TABLES,
+ }op;
+ DbTable *tb;
+ } CleanupState;
+ CleanupState *state = (CleanupState *) c_p->u.terminate;
Eterm pid = c_p->common.id;
- ErtsDbProcCleanupState default_state;
- int ret;
+ CleanupState default_state;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(c_p);
+ SWord reds = initial_reds;
if (!state) {
state = &default_state;
- state->progress = ErtsDbProcCleanupProgressTables;
- state->op = ErtsDbProcCleanupOpGetTables;
+ state->op = GET_OWNED_TABLE;
+ state->tb = NULL;
}
- while (!0) {
+ do {
switch (state->op) {
- case ErtsDbProcCleanupOpGetTables:
- state->slots.size = ARRAY_CHUNK;
- db_meta_lock(meta_pid_to_tab, LCK_READ);
- ret = db_get_element_array(meta_pid_to_tab,
- pid,
- 2,
- state->slots.arr,
- &state->slots.size);
- db_meta_unlock(meta_pid_to_tab, LCK_READ);
- if (ret == DB_ERROR_BADKEY) {
- /* Done with tables; now fixations */
- state->progress = ErtsDbProcCleanupProgressFixations;
- state->op = ErtsDbProcCleanupOpGetFixations;
- break;
- } else if (ret != DB_ERROR_NONE) {
- ERTS_DB_INTERNAL_ERROR("Inconsistent ets table metadata");
- }
-
- state->slots.ix = 0;
- state->slots.clean_ix = 0;
- state->op = ErtsDbProcCleanupOpDeleteTables;
- /* Fall through */
-
- case ErtsDbProcCleanupOpDeleteTables:
-
- while (state->slots.ix < state->slots.size) {
- DbTable *tb = NULL;
- Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
- erts_smp_rwmtx_rlock(mmtl);
- if (!IS_SLOT_FREE(ix)) {
- tb = GET_ANY_SLOT_TAB(ix);
- ASSERT(tb);
- }
- erts_smp_rwmtx_runlock(mmtl);
- if (tb) {
- int do_yield;
- db_lock(tb, LCK_WRITE);
- /* Ownership may have changed since
- we looked up the table. */
- if (tb->common.owner != pid) {
- do_yield = 0;
- db_unlock(tb, LCK_WRITE);
- }
- else if (tb->common.heir != am_none
- && tb->common.heir != pid
- && give_away_to_heir(c_p, tb)) {
- do_yield = 0;
- }
- else {
- int first_call;
-#ifdef HARDDEBUG
- erts_fprintf(stderr,
- "erts_db_process_exiting(); Table: %T, "
- "Process: %T\n",
- tb->common.id, pid);
-#endif
- first_call = (tb->common.status & DB_DELETE) == 0;
- if (first_call) {
- /* Clear all access bits. */
- tb->common.status &= ~(DB_PROTECTED
- | DB_PUBLIC
- | DB_PRIVATE);
- tb->common.status |= DB_DELETE;
-
- if (is_atom(tb->common.id))
- remove_named_tab(tb, 0);
-
- free_heir_data(tb);
- free_fixations_locked(tb);
- }
-
- do_yield = free_table_cont(c_p, tb, first_call, 0);
- db_unlock(tb, LCK_WRITE);
- }
- if (do_yield)
- goto yield;
- }
- state->slots.ix++;
- if (ERTS_BIF_REDS_LEFT(c_p) <= 0)
- goto yield;
- }
+ case GET_OWNED_TABLE: {
+ DbTable* tb;
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ tb = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ if (!tb) {
+ /* Done with owned tables; now fixations */
+ state->op = UNFIX_TABLES;
+ break;
+ }
- proc_exit_cleanup_tables_meta_data(pid, state);
- state->op = ErtsDbProcCleanupOpGetTables;
- break;
+ ASSERT(tb != state->tb);
+ state->tb = tb;
+ db_lock(tb, LCK_WRITE);
+ /*
+ * Ownership may have changed since we looked up the table.
+ */
+ if (tb->common.owner != pid) {
+ db_unlock(tb, LCK_WRITE);
+ break;
+ }
+ if (tb->common.heir != am_none
+ && tb->common.heir != pid
+ && give_away_to_heir(c_p, tb)) {
+ break;
+ }
+ /* Clear all access bits. */
+ tb->common.status &= ~(DB_PROTECTED | DB_PUBLIC | DB_PRIVATE);
+ tb->common.status |= DB_DELETE;
+
+ if (is_table_named(tb))
+ remove_named_tab(tb, 0);
+
+ free_heir_data(tb);
+ reds -= free_fixations_locked(c_p, tb);
+ tid_clear(c_p, tb);
+ db_unlock(tb, LCK_WRITE);
+ state->op = FREE_OWNED_TABLE;
+ break;
+ }
+ case FREE_OWNED_TABLE:
+ reds = free_table_continue(c_p, state->tb, reds);
+ if (reds < 0)
+ goto yield;
- case ErtsDbProcCleanupOpGetFixations:
- state->slots.size = ARRAY_CHUNK;
- db_meta_lock(meta_pid_to_fixed_tab, LCK_READ);
- ret = db_get_element_array(meta_pid_to_fixed_tab,
- pid,
- 2,
- state->slots.arr,
- &state->slots.size);
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_READ);
-
- if (ret == DB_ERROR_BADKEY) {
- /* Done */
- state->progress = ErtsDbProcCleanupProgressDone;
- state->op = ErtsDbProcCleanupOpDone;
- break;
- } else if (ret != DB_ERROR_NONE) {
- ERTS_DB_INTERNAL_ERROR("Inconsistent ets fix table metadata");
- }
+ state->op = GET_OWNED_TABLE;
+ break;
- state->slots.ix = 0;
- state->slots.clean_ix = 0;
- state->op = ErtsDbProcCleanupOpDeleteFixations;
- /* Fall through */
+ case UNFIX_TABLES: {
+ DbFixation* fix;
- case ErtsDbProcCleanupOpDeleteFixations:
+ fix = (DbFixation*) erts_psd_get(c_p, ERTS_PSD_ETS_FIXED_TABLES);
- while (state->slots.ix < state->slots.size) {
- DbTable *tb = NULL;
- Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
- erts_smp_rwmtx_rlock(mmtl);
- if (IS_SLOT_ALIVE(ix)) {
- tb = meta_main_tab[ix].u.tb;
- ASSERT(tb);
- }
- erts_smp_rwmtx_runlock(mmtl);
- if (tb) {
- int reds = 0;
-
- db_lock(tb, LCK_WRITE_REC);
- if (!(tb->common.status & DB_DELETE)) {
- DbFixation** pp;
-
- #ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
- #endif
- reds = 10;
-
- for (pp = &tb->common.fixations; *pp != NULL;
- pp = &(*pp)->next) {
- if ((*pp)->pid == pid) {
- DbFixation* fix = *pp;
- erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_refc_add(&tb->common.ref,diff,0);
- *pp = fix->next;
- erts_db_free(ERTS_ALC_T_DB_FIXATION,
- tb, fix, sizeof(DbFixation));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
- break;
- }
- }
- #ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
- #endif
- if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
- db_unfix_table_hash(&(tb->hash));
- reds += 40;
- }
- }
- db_unlock(tb, LCK_WRITE_REC);
- BUMP_REDS(c_p, reds);
- }
- state->slots.ix++;
- if (ERTS_BIF_REDS_LEFT(c_p) <= 0)
- goto yield;
- }
+ if (!fix) {
+ /* Done */
- proc_exit_cleanup_fixations_meta_data(pid, state);
- state->op = ErtsDbProcCleanupOpGetFixations;
- break;
+ if (state != &default_state)
+ erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
+ c_p->u.terminate = NULL;
- case ErtsDbProcCleanupOpDone:
+ BUMP_REDS(c_p, (initial_reds - reds));
+ return 0;
+ }
- if (state != &default_state)
- erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
- c_p->u.terminate = NULL;
- return 0;
+ fixed_tabs_delete(c_p, fix);
+ reds -= proc_cleanup_fixed_table(c_p, fix);
+ break;
+ }
default:
ERTS_DB_INTERNAL_ERROR("Bad internal state");
- }
- }
-
- yield:
+ }
- switch (state->progress) {
- case ErtsDbProcCleanupProgressTables:
- proc_exit_cleanup_tables_meta_data(pid, state);
- break;
- case ErtsDbProcCleanupProgressFixations:
- proc_exit_cleanup_fixations_meta_data(pid, state);
- break;
- default:
- break;
- }
+ } while (reds > 0);
- ASSERT(c_p->u.terminate == (void *) state
- || state == &default_state);
+ yield:
if (state == &default_state) {
c_p->u.terminate = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
- sizeof(ErtsDbProcCleanupState));
- sys_memcpy(c_p->u.terminate,
- (void*) state,
- sizeof(ErtsDbProcCleanupState));
+ sizeof(CleanupState));
+ sys_memcpy(c_p->u.terminate, (void*) state, sizeof(CleanupState));
}
+ else
+ ASSERT(state == c_p->u.terminate);
return !0;
}
+
/* SMP note: table only need to be LCK_READ locked */
static void fix_table_locked(Process* p, DbTable* tb)
{
DbFixation *fix;
- DeclareTmpHeap(meta_tuple,3,p);
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
- erts_refc_inc(&tb->common.ref,1);
- fix = tb->common.fixations;
+ erts_mtx_lock(&tb->common.fixlock);
+ erts_refc_inc(&tb->common.fix_count,1);
+ fix = tb->common.fixing_procs;
if (fix == NULL) {
tb->common.time.monotonic
= erts_get_monotonic_time(erts_proc_sched_data(p));
tb->common.time.offset = erts_get_time_offset();
}
else {
- for (; fix != NULL; fix = fix->next) {
- if (fix->pid == p->common.id) {
- ++(fix->counter);
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
- return;
- }
+ fix = fixing_procs_rbt_lookup(fix, p);
+ if (fix) {
+ ASSERT(fixed_tabs_find(NULL, fix));
+ ++(fix->counter);
+
+ erts_mtx_unlock(&tb->common.fixlock);
+ return;
}
}
fix = (DbFixation *) erts_db_alloc(ERTS_ALC_T_DB_FIXATION,
tb, sizeof(DbFixation));
ERTS_ETS_MISC_MEM_ADD(sizeof(DbFixation));
- fix->pid = p->common.id;
+ fix->tabs.btid = tb->common.btid;
+ erts_refc_inc(&fix->tabs.btid->intern.refc, 2);
+ fix->procs.p = p;
fix->counter = 1;
- fix->next = tb->common.fixations;
- tb->common.fixations = fix;
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
- p->flags |= F_USING_DB;
- UseTmpHeap(3,p);
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- if (db_put_hash(meta_pid_to_fixed_tab,
- TUPLE2(meta_tuple,
- p->common.id,
- make_small(tb->common.slot)),
- 0) != DB_ERROR_NONE) {
- UnUseTmpHeap(3,p);
- erts_exit(ERTS_ERROR_EXIT,"Could not insert ets metadata in safe_fixtable.");
- }
- UnUseTmpHeap(3,p);
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ fixing_procs_rbt_insert(&tb->common.fixing_procs, fix);
+
+ erts_mtx_unlock(&tb->common.fixlock);
+ p->flags |= F_USING_DB;
+
+ fixed_tabs_insert(p, fix);
}
/* SMP note: May re-lock table
@@ -3506,77 +3861,116 @@ static void fix_table_locked(Process* p, DbTable* tb)
static void unfix_table_locked(Process* p, DbTable* tb,
db_lock_kind_t* kind_p)
{
- DbFixation** pp;
+ DbFixation* fix;
+
+ erts_mtx_lock(&tb->common.fixlock);
+ fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p);
+
+ if (fix) {
+ erts_refc_dec(&tb->common.fix_count,0);
+ --(fix->counter);
+ ASSERT(fix->counter >= 0);
+ if (fix->counter == 0) {
+ fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
+ erts_mtx_unlock(&tb->common.fixlock);
+ fixed_tabs_delete(p, fix);
+
+ erts_refc_dec(&fix->tabs.btid->intern.refc, 1);
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
- for (pp = &tb->common.fixations; *pp != NULL; pp = &(*pp)->next) {
- if ((*pp)->pid == p->common.id) {
- DbFixation* fix = *pp;
- erts_refc_dec(&tb->common.ref,0);
- --(fix->counter);
- ASSERT(fix->counter >= 0);
- if (fix->counter > 0) {
- break;
- }
- *pp = fix->next;
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- p->common.id, make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
erts_db_free(ERTS_ALC_T_DB_FIXATION,
tb, (void *) fix, sizeof(DbFixation));
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
goto unlocked;
}
}
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
+ erts_mtx_unlock(&tb->common.fixlock);
unlocked:
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)
- && erts_smp_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) {
-#ifdef ERTS_SMP
+ && erts_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) {
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
- erts_smp_rwmtx_runlock(&tb->common.rwlock);
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ erts_rwmtx_runlock(&tb->common.rwlock);
+ erts_rwmtx_rwlock(&tb->common.rwlock);
*kind_p = LCK_WRITE;
- if (tb->common.status & DB_DELETE) return;
+ if (tb->common.status & (DB_DELETE|DB_BUSY))
+ return;
}
-#endif
db_unfix_table_hash(&(tb->hash));
}
}
-/* Assume that tb is WRITE locked */
-static void free_fixations_locked(DbTable *tb)
+struct free_fixations_ctx
{
- DbFixation *fix;
- DbFixation *next_fix;
+ Process* p;
+ DbTable* tb;
+ SWord cnt;
+};
+
+static void free_fixations_op(DbFixation* fix, void* vctx)
+{
+ struct free_fixations_ctx* ctx = (struct free_fixations_ctx*) vctx;
+ erts_aint_t diff;
+
+ ASSERT(btid2tab(fix->tabs.btid) == ctx->tb);
+ ASSERT(fix->counter > 0);
+ ASSERT(ctx->tb->common.status & DB_DELETE);
- fix = tb->common.fixations;
- while (fix != NULL) {
- erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_refc_add(&tb->common.ref,diff,0);
- next_fix = fix->next;
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- fix->pid,
- make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- erts_db_free(ERTS_ALC_T_DB_FIXATION,
- tb, (void *) fix, sizeof(DbFixation));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ diff = -((erts_aint_t) fix->counter);
+ erts_refc_add(&ctx->tb->common.fix_count, diff, 0);
- fix = next_fix;
+ if (fix->procs.p != ctx->p) { /* Fixated by other process */
+ fix->counter = 0;
+
+ /* Fake memory stats for table */
+ ASSERT(sizeof(DbFixation) == ERTS_ALC_DBG_BLK_SZ(fix));
+ ERTS_DB_ALC_MEM_UPDATE_(ctx->tb, sizeof(DbFixation), 0);
+
+ erts_schedule_ets_free_fixation(fix->procs.p->common.id, fix);
+ /*
+ * Either sys task is scheduled and erts_db_execute_free_fixation()
+ * will remove 'fix' or process will exit, drop sys task and
+ * proc_cleanup_fixed_table() will remove 'fix'.
+ */
+ }
+ else
+ {
+ fixed_tabs_delete(fix->procs.p, fix);
+
+ erts_bin_release(fix->tabs.btid);
+
+ erts_db_free(ERTS_ALC_T_DB_FIXATION,
+ ctx->tb, (void *) fix, sizeof(DbFixation));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
}
- tb->common.fixations = NULL;
+ ctx->cnt++;
+}
+
+int erts_db_execute_free_fixation(Process* p, DbFixation* fix)
+{
+ ASSERT(fix->counter == 0);
+ fixed_tabs_delete(p, fix);
+
+ erts_bin_release(fix->tabs.btid);
+
+ erts_free(ERTS_ALC_T_DB_FIXATION, fix);
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ return 1;
+}
+
+static SWord free_fixations_locked(Process* p, DbTable *tb)
+{
+ struct free_fixations_ctx ctx;
+
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+
+ ctx.p = p;
+ ctx.tb = tb;
+ ctx.cnt = 0;
+ fixing_procs_rbt_foreach_destroy(&tb->common.fixing_procs,
+ free_fixations_op, &ctx);
+ tb->common.fixing_procs = NULL;
+ return ctx.cnt;
}
static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data)
@@ -3640,55 +4034,41 @@ static void free_heir_data(DbTable* tb)
static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1)
{
- Process *p = BIF_P;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ SWord reds = initial_reds;
Eterm cont = BIF_ARG_1;
- int trap;
Eterm* ptr = big_val(cont);
DbTable *tb = *((DbTable **) (UWord) (ptr + 1));
ASSERT(*ptr == make_pos_bignum_header(1));
- db_lock(tb, LCK_WRITE);
- trap = free_table_cont(p, tb, 0, 1);
- db_unlock(tb, LCK_WRITE);
-
- if (trap) {
- BIF_TRAP1(&ets_delete_continue_exp, p, cont);
+ reds = free_table_continue(BIF_P, tb, reds);
+ if (reds < 0) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(&ets_delete_continue_exp, BIF_P, cont);
}
else {
+ BUMP_REDS(BIF_P, (initial_reds - reds));
BIF_RET(am_true);
}
}
/*
- * free_table_cont() returns 0 when done and !0 when more work is needed.
+ * free_table_continue() returns reductions left
+ * done if >= 0
+ * yield if < 0
*/
-static int free_table_cont(Process *p,
- DbTable *tb,
- int first,
- int clean_meta_tab)
+static SWord free_table_continue(Process *p, DbTable *tb, SWord reds)
{
- Eterm result;
- erts_smp_rwmtx_t *mmtl;
+ reds = tb->common.meth->db_free_table_continue(tb, reds);
-#ifdef HARDDEBUG
- if (!first) {
- erts_fprintf(stderr,"ets: free_table_cont %T (continue)\r\n",
- tb->common.id);
- }
-#endif
-
- result = tb->common.meth->db_free_table_continue(tb);
-
- if (result == 0) {
+ if (reds < 0) {
#ifdef HARDDEBUG
erts_fprintf(stderr,"ets: free_table_cont %T (continue begin)\r\n",
tb->common.id);
#endif
/* More work to be done. Let other processes work and call us again. */
- BUMP_ALL_REDS(p);
- return !0;
}
else {
#ifdef HARDDEBUG
@@ -3696,27 +4076,28 @@ static int free_table_cont(Process *p,
tb->common.id);
#endif
/* Completely done - we will not get called again. */
- mmtl = get_meta_main_tab_lock(tb->common.slot);
-#ifdef ERTS_SMP
- if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
- erts_smp_rwmtx_rwlock(mmtl);
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
- }
-#endif
- free_slot(tb->common.slot);
- erts_smp_rwmtx_rwunlock(mmtl);
-
- if (clean_meta_tab) {
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab,tb->common.owner,
- make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- }
- schedule_free_dbtable(tb);
- BUMP_REDS(p, 100);
- return 0;
+ delete_owned_table(p, tb);
+ table_dec_refc(tb, 0);
}
+ return reds;
+}
+
+struct fixing_procs_info_ctx
+{
+ Process* p;
+ Eterm list;
+};
+
+static void fixing_procs_info_op(DbFixation* fix, void* vctx)
+{
+ struct fixing_procs_info_ctx* ctx = (struct fixing_procs_info_ctx*) vctx;
+ Eterm* hp;
+ Eterm tpl;
+
+ hp = HAllocX(ctx->p, 5, 100);
+ tpl = TUPLE2(hp, fix->procs.p->common.id, make_small(fix->counter));
+ hp += 3;
+ ctx->list = CONS(hp, tpl, ctx->list);
}
static Eterm table_info(Process* p, DbTable* tb, Eterm What)
@@ -3725,7 +4106,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
int use_monotonic;
if (What == am_size) {
- ret = make_small(erts_smp_atomic_read_nob(&tb->common.nitems));
+ ret = make_small(erts_atomic_read_nob(&tb->common.nitems));
} else if (What == am_type) {
if (tb->common.status & DB_SET) {
ret = am_set;
@@ -3738,7 +4119,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_bag;
}
} else if (What == am_memory) {
- Uint words = (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size)
+ Uint words = (Uint) ((erts_atomic_read_nob(&tb->common.memory_size)
+ sizeof(Uint)
- 1)
/ sizeof(Uint));
@@ -3765,17 +4146,20 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
} else if (What == am_node) {
ret = erts_this_dist_entry->sysname;
} else if (What == am_named_table) {
- ret = is_atom(tb->common.id) ? am_true : am_false;
+ ret = is_table_named(tb) ? am_true : am_false;
} else if (What == am_compressed) {
ret = tb->common.compress ? am_true : am_false;
+ } else if (What == am_id) {
+ ret = make_tid(p, tb);
}
+
/*
* For debugging purposes
*/
else if (What == am_data) {
print_table(ERTS_PRINT_STDOUT, NULL, 1, tb);
ret = am_true;
- } else if (What == am_atom_put("fixed",5)) {
+ } else if (ERTS_IS_ATOM_STR("fixed",What)) {
if (IS_FIXED(tb))
ret = am_true;
else
@@ -3784,15 +4168,13 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
= ERTS_IS_ATOM_STR("safe_fixed_monotonic_time",
What))
|| ERTS_IS_ATOM_STR("safe_fixed", What)) {
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
+ erts_mtx_lock(&tb->common.fixlock);
if (IS_FIXED(tb)) {
Uint need;
Eterm *hp;
- Eterm tpl, lst;
- DbFixation *fix;
+ Eterm time;
Sint64 mtime;
+ struct fixing_procs_info_ctx ctx;
need = 3;
if (use_monotonic) {
@@ -3805,19 +4187,15 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
mtime = 0;
need += 4;
}
- for (fix = tb->common.fixations; fix != NULL; fix = fix->next) {
- need += 5;
- }
+ ctx.p = p;
+ ctx.list = NIL;
+ fixing_procs_rbt_foreach(tb->common.fixing_procs,
+ fixing_procs_info_op,
+ &ctx);
+
hp = HAlloc(p, need);
- lst = NIL;
- for (fix = tb->common.fixations; fix != NULL; fix = fix->next) {
- tpl = TUPLE2(hp,fix->pid,make_small(fix->counter));
- hp += 3;
- lst = CONS(hp,tpl,lst);
- hp += 2;
- }
if (use_monotonic)
- tpl = (IS_SSMALL(mtime)
+ time = (IS_SSMALL(mtime)
? make_small(mtime)
: erts_sint64_to_big(mtime, &hp));
else {
@@ -3825,17 +4203,15 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
erts_make_timestamp_value(&ms, &s, &us,
tb->common.time.monotonic,
tb->common.time.offset);
- tpl = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
+ time = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
hp += 4;
}
- ret = TUPLE2(hp, tpl, lst);
+ ret = TUPLE2(hp, time, ctx.list);
} else {
ret = am_false;
}
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
- } else if (What == am_atom_put("stats",5)) {
+ erts_mtx_unlock(&tb->common.fixlock);
+ } else if (ERTS_IS_ATOM_STR("stats",What)) {
if (IS_HASH_TABLE(tb->common.status)) {
FloatDef f;
DbHashStats stats;
@@ -3858,7 +4234,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
std_dev_exp = make_float(hp);
PUT_DOUBLE(f, hp);
hp += FLOAT_SIZE_OBJECT;
- ret = TUPLE7(hp, make_small(erts_smp_atomic_read_nob(&tb->hash.nactive)),
+ ret = TUPLE7(hp, make_small(erts_atomic_read_nob(&tb->hash.nactive)),
avg, std_dev_real, std_dev_exp,
make_small(stats.min_chain_len),
make_small(stats.max_chain_len),
@@ -3871,16 +4247,28 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
return ret;
}
-static void print_table(int to, void *to_arg, int show, DbTable* tb)
+static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb)
{
- erts_print(to, to_arg, "Table: %T\n", tb->common.id);
+ Eterm tid;
+ Eterm heap[ERTS_MAGIC_REF_THING_SIZE];
+
+ if (is_table_named(tb)) {
+ tid = tb->common.the_name;
+ } else {
+ ErlOffHeap oh;
+ ERTS_INIT_OFF_HEAP(&oh);
+ write_magic_ref_thing(heap, &oh, (ErtsMagicBinary *) tb->common.btid);
+ tid = make_internal_ref(heap);
+ }
+
+ erts_print(to, to_arg, "Table: %T\n", tid);
erts_print(to, to_arg, "Name: %T\n", tb->common.the_name);
tb->common.meth->db_print(to, to_arg, show, tb);
- erts_print(to, to_arg, "Objects: %d\n", (int)erts_smp_atomic_read_nob(&tb->common.nitems));
+ erts_print(to, to_arg, "Objects: %d\n", (int)erts_atomic_read_nob(&tb->common.nitems));
erts_print(to, to_arg, "Words: %bpu\n",
- (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size)
+ (Uint) ((erts_atomic_read_nob(&tb->common.memory_size)
+ sizeof(Uint)
- 1)
/ sizeof(Uint)));
@@ -3891,44 +4279,60 @@ static void print_table(int to, void *to_arg, int show, DbTable* tb)
erts_print(to, to_arg, "Read Concurrency: %T\n", table_info(NULL, tb, am_read_concurrency));
}
-void db_info(int to, void *to_arg, int show) /* Called by break handler */
+typedef struct {
+ fmtfn_t to;
+ void *to_arg;
+ int show;
+} ErtsPrintDbInfo;
+
+static void
+db_info_print(DbTable *tb, void *vpdbip)
{
- int i;
- for (i=0; i < db_max_tabs; i++)
- if (IS_SLOT_ALIVE(i)) {
- erts_print(to, to_arg, "=ets:%T\n", meta_main_tab[i].u.tb->common.owner);
- erts_print(to, to_arg, "Slot: %d\n", i);
- print_table(to, to_arg, show, meta_main_tab[i].u.tb);
- }
-#ifdef DEBUG
- erts_print(to, to_arg, "=internal_ets: Process to table index\n");
- print_table(to, to_arg, show, meta_pid_to_tab);
- erts_print(to, to_arg, "=internal_ets: Process to fixation index\n");
- print_table(to, to_arg, show, meta_pid_to_fixed_tab);
-#endif
+ ErtsPrintDbInfo *pdbip = (ErtsPrintDbInfo *) vpdbip;
+ erts_print(pdbip->to, pdbip->to_arg, "=ets:%T\n", tb->common.owner);
+ erts_print(pdbip->to, pdbip->to_arg, "Slot: %bpu\n", (Uint) tb);
+ print_table(pdbip->to, pdbip->to_arg, pdbip->show, tb);
+}
+
+void db_info(fmtfn_t to, void *to_arg, int show) /* Called by break handler */
+{
+ ErtsPrintDbInfo pdbi;
+
+ pdbi.to = to;
+ pdbi.to_arg = to_arg;
+ pdbi.show = show;
+
+ erts_db_foreach_table(db_info_print, &pdbi);
}
Uint
erts_get_ets_misc_mem_size(void)
{
- ERTS_SMP_MEMORY_BARRIER;
+ ERTS_THR_MEMORY_BARRIER;
/* Memory not allocated in ets_alloc */
- return (Uint) erts_smp_atomic_read_nob(&erts_ets_misc_mem_size);
+ return (Uint) erts_atomic_read_nob(&erts_ets_misc_mem_size);
}
/* SMP Note: May only be used when system is locked */
void
erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg)
{
- int i, j;
- j = 0;
- for(i = 0; (i < db_max_tabs && j < meta_main_tab_cnt); i++) {
- if (IS_SLOT_ALIVE(i)) {
- j++;
- (*func)(meta_main_tab[i].u.tb, arg);
- }
+ int ix;
+
+ ASSERT(erts_thr_progress_is_blocking());
+
+ for (ix = 0; ix < erts_no_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
+ DbTable *first = esdp->ets_tables.clist;
+ if (first) {
+ DbTable *tb = first;
+ do {
+ if (is_table_alive(tb))
+ (*func)(tb, arg);
+ tb = tb->common.all.next;
+ } while (tb != first);
+ }
}
- ASSERT(j == meta_main_tab_cnt);
}
/* SMP Note: May only be used when system is locked */
@@ -3947,6 +4351,18 @@ erts_db_get_max_tabs()
return db_max_tabs;
}
+Uint erts_ets_table_count(void)
+{
+ Uint tb_count = 0;
+ Uint six;
+
+ for (six = 0; six < erts_no_schedulers; six++) {
+ ErtsSchedulerData *esdp = &erts_aligned_scheduler_data[six].esd;
+ tb_count += erts_atomic_read_nob(&esdp->ets_tables.count);
+ }
+ return tb_count;
+}
+
/*
* For testing of meta tables only.
*
@@ -3967,7 +4383,7 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
while (index >= atom_table_size()) {
char tmp[20];
erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size());
- erts_atom_put((byte *) tmp, strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
+ erts_atom_put((byte *) tmp, sys_strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
}
list = CONS(hp, make_atom(index), list);
hp += 2;
@@ -3978,23 +4394,50 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
return list;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
-#ifdef HARDDEBUG /* Here comes some debug functions */
+void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable) {
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&tb->common.rwlock.lcnt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_lcnt_install_new_lock_info(&tb->common.fixlock.lcnt, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(&tb->common.rwlock.lcnt);
+ erts_lcnt_uninstall(&tb->common.fixlock.lcnt);
+ }
-void db_check_tables(void)
-{
-#ifdef ERTS_SMP
- return;
-#else
- int i;
+ if(IS_HASH_TABLE(tb->common.status)) {
+ erts_lcnt_enable_db_hash_lock_count(&tb->hash, enable);
+ }
+}
- for (i = 0; i < db_max_tabs; i++) {
- if (IS_SLOT_ALIVE(i)) {
- DbTable* tb = meta_main_tab[i].t;
- tb->common.meth->db_check_table(tb);
- }
+static void lcnt_update_db_locks_per_sched(void *enable) {
+ ErtsSchedulerData *esdp;
+ DbTable *head;
+
+ esdp = erts_get_scheduler_data();
+ head = esdp->ets_tables.clist;
+
+ if(head) {
+ DbTable *iterator = head;
+
+ do {
+ if(is_table_alive(iterator)) {
+ erts_lcnt_enable_db_lock_count(iterator, !!enable);
+ }
+
+ iterator = iterator->common.all.next;
+ } while (iterator != head);
}
-#endif
}
-#endif /* HARDDEBUG */
+void erts_lcnt_update_db_locks(int enable) {
+ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers,
+ &lcnt_update_db_locks_per_sched, (void*)(UWord)enable);
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
+
+#ifdef ETS_DBG_FORCE_TRAP
+erts_aint_t erts_ets_dbg_force_trap = 0;
+#endif
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index 1d26c49652..23975d208f 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,8 +24,37 @@
*
*/
-#ifndef __DB_H__
-#define __DB_H__
+#ifndef ERTS_DB_SCHED_SPEC_TYPES__
+#define ERTS_DB_SCHED_SPEC_TYPES__
+
+union db_table;
+typedef union db_table DbTable;
+
+typedef struct ErtsEtsAllReq_ ErtsEtsAllReq;
+
+typedef struct {
+ ErtsEtsAllReq *next;
+ ErtsEtsAllReq *prev;
+} ErtsEtsAllReqList;
+
+typedef struct {
+ ErtsEtsAllReq *ongoing;
+ ErlHeapFragment *hfrag;
+ DbTable *tab;
+ ErtsEtsAllReq *queue;
+} ErtsEtsAllYieldData;
+
+typedef struct {
+ erts_atomic_t count;
+ DbTable *clist;
+} ErtsEtsTables;
+
+#endif /* ERTS_DB_SCHED_SPEC_TYPES__ */
+
+#ifndef ERTS_ONLY_SCHED_SPEC_ETS_DATA
+
+#ifndef ERL_DB_H__
+#define ERL_DB_H__
#include "sys.h"
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
@@ -40,12 +69,19 @@
/*TT*/
Uint erts_get_ets_misc_mem_size(void);
+Uint erts_ets_table_count(void);
typedef struct {
DbTableCommon common;
ErtsThrPrgrLaterOp data;
} DbTableRelease;
+struct ErtsSchedulerData_;
+int erts_handle_yielded_ets_all_request(struct ErtsSchedulerData_ *esdp,
+ ErtsEtsAllYieldData *eadp);
+
+void erts_ets_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);
+
/*
* So, the structure for a database table, NB this is only
* interesting in db.c.
@@ -58,7 +94,7 @@ union db_table {
/*TT*/
};
-#define DB_DEF_MAX_TABS 2053 /* Superseeded by environment variable
+#define DB_DEF_MAX_TABS 8192 /* Superseeded by environment variable
"ERL_MAX_ETS_TABLES" */
#define ERL_MAX_ETS_TABLES_ENV "ERL_MAX_ETS_TABLES"
@@ -74,7 +110,8 @@ typedef enum {
void init_db(ErtsDbSpinCount);
int erts_db_process_exiting(Process *, ErtsProcLocks);
-void db_info(int, void *, int);
+int erts_db_execute_free_fixation(Process*, DbFixation*);
+void db_info(fmtfn_t, void *, int);
void erts_db_foreach_table(void (*)(DbTable *, void *), void *);
void erts_db_foreach_offheap(DbTable *,
void (*func)(ErlOffHeap *, void *),
@@ -86,15 +123,25 @@ extern int erts_ets_realloc_always_moves; /* set in erl_init */
extern int erts_ets_always_compress; /* set in erl_init */
extern Export ets_select_delete_continue_exp;
extern Export ets_select_count_continue_exp;
+extern Export ets_select_replace_continue_exp;
extern Export ets_select_continue_exp;
-extern erts_smp_atomic_t erts_ets_misc_mem_size;
+extern erts_atomic_t erts_ets_misc_mem_size;
Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
-
Uint erts_db_get_max_tabs(void);
+Eterm erts_db_make_tid(Process *c_p, DbTableCommon *tb);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable);
+void erts_lcnt_update_db_locks(int enable);
#endif
+#ifdef ETS_DBG_FORCE_TRAP
+extern erts_aint_t erts_ets_dbg_force_trap;
+#endif
+
+#endif /* ERL_DB_H__ */
+
#if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__)
#define ERTS_HAVE_DB_INTERNAL__
@@ -110,11 +157,11 @@ do { \
erts_aint_t sz__ = (((erts_aint_t) (ALLOC_SZ)) \
- ((erts_aint_t) (FREE_SZ))); \
ASSERT((TAB)); \
- erts_smp_atomic_add_nob(&(TAB)->common.memory_size, sz__); \
+ erts_atomic_add_nob(&(TAB)->common.memory_size, sz__); \
} while (0)
#define ERTS_ETS_MISC_MEM_ADD(SZ) \
- erts_smp_atomic_add_nob(&erts_ets_misc_mem_size, (SZ));
+ erts_atomic_add_nob(&erts_ets_misc_mem_size, (SZ));
ERTS_GLB_INLINE void *erts_db_alloc(ErtsAlcType_t type,
DbTable *tab,
@@ -251,7 +298,7 @@ erts_db_free(ErtsAlcType_t type, DbTable *tab, void *ptr, Uint size)
ERTS_DB_ALC_MEM_UPDATE_(tab, size, 0);
ASSERT(((void *) tab) != ptr
- || erts_smp_atomic_read_nob(&tab->common.memory_size) == 0);
+ || erts_atomic_read_nob(&tab->common.memory_size) == 0);
erts_free(type, ptr);
}
@@ -267,7 +314,6 @@ erts_db_free_nt(ErtsAlcType_t type, void *ptr, Uint size)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#undef ERTS_DB_ALC_MEM_UPDATE_
-
#endif /* #if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__) */
+#endif /* !ERTS_ONLY_SCHED_SPEC_ETS_DATA */
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 5e6fe4f460..752d3ae3a8 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@
/*
** Implementation of unordered ETS tables.
** The tables are implemented as linear dynamic hash tables.
+** https://en.wikipedia.org/wiki/Linear_hashing
*/
/* SMP:
@@ -84,46 +85,47 @@
#include "erl_db_hash.h"
-#ifdef MYDEBUG /* Will fail test case ets_SUITE:memory */
-# define IF_DEBUG(x) x
-# define MY_ASSERT(x) ASSERT(x)
-#else
-# define IF_DEBUG(x)
-# define MY_ASSERT(x)
-#endif
-
/*
* The following symbols can be manipulated to "tune" the linear hash array
*/
-#define CHAIN_LEN 6 /* Medium bucket chain len */
+#define GROW_LIMIT(NACTIVE) ((NACTIVE)*1)
+#define SHRINK_LIMIT(NACTIVE) ((NACTIVE) / 2)
+
+/*
+** We want the first mandatory segment to be small (to reduce minimal footprint)
+** and larger extra segments (to reduce number of alloc/free calls).
+*/
-/* Number of slots per segment */
-#define SEGSZ_EXP 8
-#define SEGSZ (1 << SEGSZ_EXP)
-#define SEGSZ_MASK (SEGSZ-1)
+/* Number of slots in first segment */
+#define FIRST_SEGSZ_EXP 8
+#define FIRST_SEGSZ (1 << FIRST_SEGSZ_EXP)
+#define FIRST_SEGSZ_MASK (FIRST_SEGSZ - 1)
-#define NSEG_1 2 /* Size of first segment table (must be at least 2) */
+/* Number of slots per extra segment */
+#define EXT_SEGSZ_EXP 11
+#define EXT_SEGSZ (1 << EXT_SEGSZ_EXP)
+#define EXT_SEGSZ_MASK (EXT_SEGSZ-1)
+
+#define NSEG_1 (ErtsSizeofMember(DbTableHash,first_segtab) / sizeof(struct segment*))
#define NSEG_2 256 /* Size of second segment table */
#define NSEG_INC 128 /* Number of segments to grow after that */
-#ifdef ERTS_SMP
# define DB_USING_FINE_LOCKING(TB) (((TB))->common.type & DB_FINE_LOCKED)
-#else
-# define DB_USING_FINE_LOCKING(TB) 0
-#endif
#ifdef ETHR_ORDERED_READ_DEPEND
-#define SEGTAB(tb) ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab))
+#define SEGTAB(tb) ((struct segment**) erts_atomic_read_nob(&(tb)->segtab))
#else
#define SEGTAB(tb) \
(DB_USING_FINE_LOCKING(tb) \
- ? ((struct segment**) erts_smp_atomic_read_ddrb(&(tb)->segtab)) \
- : ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab)))
+ ? ((struct segment**) erts_atomic_read_ddrb(&(tb)->segtab)) \
+ : ((struct segment**) erts_atomic_read_nob(&(tb)->segtab)))
#endif
-#define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive))
-#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems))
+#define NACTIVE(tb) ((int)erts_atomic_read_nob(&(tb)->nactive))
+#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems))
-#define BUCKET(tb, i) SEGTAB(tb)[(i) >> SEGSZ_EXP]->buckets[(i) & SEGSZ_MASK]
+#define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP)
+
+#define BUCKET(tb, i) SEGTAB(tb)[SLOT_IX_TO_SEG_IX(i)]->buckets[(i) & EXT_SEGSZ_MASK]
/*
* When deleting a table, the number of records to delete.
@@ -137,110 +139,129 @@
static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval)
{
Uint mask = (DB_USING_FINE_LOCKING(tb)
- ? erts_smp_atomic_read_acqb(&tb->szm)
- : erts_smp_atomic_read_nob(&tb->szm));
+ ? erts_atomic_read_acqb(&tb->szm)
+ : erts_atomic_read_nob(&tb->szm));
Uint ix = hval & mask;
- if (ix >= erts_smp_atomic_read_nob(&tb->nactive)) {
+ if (ix >= erts_atomic_read_nob(&tb->nactive)) {
ix &= mask>>1;
- ASSERT(ix < erts_smp_atomic_read_nob(&tb->nactive));
+ ASSERT(ix < erts_atomic_read_nob(&tb->nactive));
}
return ix;
}
-/* Remember a slot containing a pseudo-deleted item (INVALID_HASH)
- * Return false if we got raced by unfixing thread
- * and the object should be deleted for real.
- */
-static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix,
- erts_aint_t fixated_by_me)
+
+static ERTS_INLINE FixedDeletion* alloc_fixdel(DbTableHash* tb)
{
- erts_aint_t was_next;
- erts_aint_t exp_next;
FixedDeletion* fixd = (FixedDeletion*) erts_db_alloc(ERTS_ALC_T_DB_FIX_DEL,
- (DbTable *) tb,
- sizeof(FixedDeletion));
+ (DbTable *) tb,
+ sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion));
- fixd->slot = ix;
- was_next = erts_smp_atomic_read_acqb(&tb->fixdel);
+ return fixd;
+}
+
+static ERTS_INLINE void free_fixdel(DbTableHash* tb, FixedDeletion* fixd)
+{
+ erts_db_free(ERTS_ALC_T_DB_FIX_DEL, (DbTable*)tb,
+ fixd, sizeof(FixedDeletion));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
+}
+
+static ERTS_INLINE int link_fixdel(DbTableHash* tb,
+ FixedDeletion* fixd,
+ erts_aint_t fixated_by_me)
+{
+ erts_aint_t was_next;
+ erts_aint_t exp_next;
+
+ was_next = erts_atomic_read_acqb(&tb->fixdel);
do { /* Lockless atomic insertion in linked list: */
if (NFIXED(tb) <= fixated_by_me) {
- erts_db_free(ERTS_ALC_T_DB_FIX_DEL, (DbTable*)tb,
- fixd, sizeof(FixedDeletion));
+ free_fixdel(tb, fixd);
return 0; /* raced by unfixer */
}
exp_next = was_next;
fixd->next = (FixedDeletion*) exp_next;
- was_next = erts_smp_atomic_cmpxchg_mb(&tb->fixdel,
+ was_next = erts_atomic_cmpxchg_mb(&tb->fixdel,
(erts_aint_t) fixd,
exp_next);
}while (was_next != exp_next);
return 1;
}
+/* Remember a slot containing a pseudo-deleted item
+ * Return false if we got raced by unfixing thread
+ * and the object should be deleted for real.
+ */
+static int add_fixed_deletion(DbTableHash* tb, int ix,
+ erts_aint_t fixated_by_me)
+{
+ FixedDeletion* fixd = alloc_fixdel(tb);
+ fixd->slot = ix;
+ fixd->all = 0;
+ return link_fixdel(tb, fixd, fixated_by_me);
+}
+
+
+static ERTS_INLINE int is_pseudo_deleted(HashDbTerm* p)
+{
+ return p->pseudo_deleted;
+}
-#define MAX_HASH 0xEFFFFFFFUL
-#define INVALID_HASH 0xFFFFFFFFUL
/* optimised version of make_hash (normal case? atomic key) */
#define MAKE_HASH(term) \
((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
- make_internal_hash(term)) % MAX_HASH)
+ make_internal_hash(term, 0)) & MAX_HASH_MASK)
-#ifdef ERTS_SMP
# define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1)
# define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck)
+# define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval))
/* Fine grained read lock */
-static ERTS_INLINE erts_smp_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
+static ERTS_INLINE erts_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
{
if (tb->common.is_thread_safe) {
return NULL;
} else {
- erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval);
+ erts_rwmtx_t* lck = GET_LOCK(tb,hval);
ASSERT(tb->common.type & DB_FINE_LOCKED);
- erts_smp_rwmtx_rlock(lck);
+ erts_rwmtx_rlock(lck);
return lck;
}
}
/* Fine grained write lock */
-static ERTS_INLINE erts_smp_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval)
+static ERTS_INLINE erts_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval)
{
if (tb->common.is_thread_safe) {
return NULL;
} else {
- erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval);
+ erts_rwmtx_t* lck = GET_LOCK(tb,hval);
ASSERT(tb->common.type & DB_FINE_LOCKED);
- erts_smp_rwmtx_rwlock(lck);
+ erts_rwmtx_rwlock(lck);
return lck;
}
}
-static ERTS_INLINE void RUNLOCK_HASH(erts_smp_rwmtx_t* lck)
+static ERTS_INLINE void RUNLOCK_HASH(erts_rwmtx_t* lck)
{
if (lck != NULL) {
- erts_smp_rwmtx_runlock(lck);
+ erts_rwmtx_runlock(lck);
}
}
-static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck)
+static ERTS_INLINE void WUNLOCK_HASH(erts_rwmtx_t* lck)
{
if (lck != NULL) {
- erts_smp_rwmtx_rwunlock(lck);
+ erts_rwmtx_rwunlock(lck);
}
}
-#else /* ERTS_SMP */
-# define RLOCK_HASH(tb,hval) NULL
-# define WLOCK_HASH(tb,hval) NULL
-# define RUNLOCK_HASH(lck) ((void)lck)
-# define WUNLOCK_HASH(lck) ((void)lck)
-#endif /* ERTS_SMP */
#ifdef ERTS_ENABLE_LOCK_CHECK
# define IFN_EXCL(tb,cmd) (((tb)->common.is_thread_safe) || (cmd))
-# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval)))
-# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rwlocked(lck))
-# define IS_TAB_WLOCKED(tb) erts_smp_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock)
+# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval)))
+# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_lc_rwmtx_is_rwlocked(lck))
+# define IS_TAB_WLOCKED(tb) erts_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock)
#else
# define IS_HASH_RLOCKED(tb,hval) (1)
# define IS_HASH_WLOCKED(tb,hval) (1)
@@ -253,47 +274,44 @@ static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck)
** Slot READ locks updated accordingly, unlocked if EOT.
*/
static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix,
- erts_smp_rwmtx_t** lck_ptr)
+ erts_rwmtx_t** lck_ptr)
{
-#ifdef ERTS_SMP
ix += DB_HASH_LOCK_CNT;
if (ix < NACTIVE(tb)) return ix;
RUNLOCK_HASH(*lck_ptr);
ix = (ix + 1) & DB_HASH_LOCK_MASK;
if (ix != 0) *lck_ptr = RLOCK_HASH(tb,ix);
return ix;
-#else
- return (++ix < NACTIVE(tb)) ? ix : 0;
-#endif
}
/* Same as next_slot but with WRITE locking */
static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix,
- erts_smp_rwmtx_t** lck_ptr)
+ erts_rwmtx_t** lck_ptr)
{
-#ifdef ERTS_SMP
ix += DB_HASH_LOCK_CNT;
if (ix < NACTIVE(tb)) return ix;
WUNLOCK_HASH(*lck_ptr);
ix = (ix + 1) & DB_HASH_LOCK_MASK;
if (ix != 0) *lck_ptr = WLOCK_HASH(tb,ix);
return ix;
-#else
- return next_slot(tb,ix,lck_ptr);
-#endif
}
-/*
- * Some special binary flags
- */
-#define BIN_FLAG_ALL_OBJECTS BIN_FLAG_USR1
-
static ERTS_INLINE void free_term(DbTableHash *tb, HashDbTerm* p)
{
db_free_term((DbTable*)tb, p, offsetof(HashDbTerm, dbterm));
}
+static ERTS_INLINE void free_term_list(DbTableHash *tb, HashDbTerm* p)
+{
+ while (p) {
+ HashDbTerm* next = p->next;
+ free_term(tb, p);
+ p = next;
+ }
+}
+
+
/*
* Local types
*/
@@ -303,9 +321,6 @@ struct mp_prefound {
};
struct mp_info {
- int all_objects; /* True if complete objects are always
- * returned from the match_spec (can use
- * copy_shallow on the return value) */
int something_can_match; /* The match_spec is not "impossible" */
int key_given;
struct mp_prefound dlists[10]; /* Default list of "pre-found" buckets */
@@ -318,83 +333,52 @@ struct mp_info {
/* A table segment */
struct segment {
- HashDbTerm* buckets[SEGSZ];
-#ifdef MYDEBUG
- int is_ext_segment;
-#endif
+ HashDbTerm* buckets[1];
};
+#define SIZEOF_SEGMENT(N) \
+ (offsetof(struct segment,buckets) + sizeof(HashDbTerm*)*(N))
-/* A segment that also contains a segment table */
-struct ext_segment {
- struct segment s; /* The segment itself. Must be first */
-
+/* An extended segment table */
+struct ext_segtab {
+ ErtsThrPrgrLaterOp lop;
struct segment** prev_segtab; /* Used when table is shrinking */
- int nsegs; /* Size of segtab */
+ int prev_nsegs; /* Size of prev_segtab */
+ int nsegs; /* Size of this segtab */
struct segment* segtab[1]; /* The segment table */
};
-#define SIZEOF_EXTSEG(NSEGS) \
- (offsetof(struct ext_segment,segtab) + sizeof(struct segment*)*(NSEGS))
-
-#if defined(DEBUG) || defined(VALGRIND)
-# define EXTSEG(SEGTAB_PTR) \
- ((struct ext_segment*) (((char*)(SEGTAB_PTR)) - offsetof(struct ext_segment,segtab)))
-#endif
+#define SIZEOF_EXT_SEGTAB(NSEGS) \
+ (offsetof(struct ext_segtab,segtab) + sizeof(struct segment*)*(NSEGS))
static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb,
struct segment** segtab)
{
if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
+ erts_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
else
- erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab);
-#ifdef VALGRIND
- tb->top_ptr_to_segment_with_active_segtab = EXTSEG(segtab);
-#endif
+ erts_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab);
}
-
-/* How the table segments relate to each other:
-
- ext_segment: ext_segment: "plain" segment
- #=================# #================# #=============#
- | bucket[0] |<--+ +------->| bucket[256] | +->| bucket[512] |
- | bucket[1] | | | | [257] | | | [513] |
- : : | | : : | : :
- | bucket[255] | | | | [511] | | | [767] |
- |-----------------| | | |----------------| | #=============#
- | prev_segtab=NULL| | | +--<---prev_segtab | |
- | nsegs = 2 | | | | | nsegs = 256 | |
-+->| segtab[0] -->-------+---|---|--<---segtab[0] |<-+ |
-| | segtab[1] -->-----------+---|--<---segtab[1] | | |
-| #=================# | | segtab[2] -->-----|--+ ext_segment:
-| | : : | #================#
-+----------------<---------------+ | segtab[255] ->----|----->| bucket[255*256]|
- #================# | | |
- | : :
- | |----------------|
- +----<---prev_segtab |
- : :
-*/
-
+/* Used by select_replace on analyze_pattern */
+typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Eterm body);
/*
** Forward decl's (static functions)
*/
-static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix,
- struct segment** old_segtab);
-static int alloc_seg(DbTableHash *tb);
+static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix);
+static void alloc_seg(DbTableHash *tb);
static int free_seg(DbTableHash *tb, int free_records);
-static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
- HashDbTerm *list);
+static HashDbTerm* next_live(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr,
+ HashDbTerm *list);
static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
HashValue hval, HashDbTerm *list);
-static void shrink(DbTableHash* tb, int nactive);
-static void grow(DbTableHash* tb, int nactive);
+static void shrink(DbTableHash* tb, int nitems);
+static void grow(DbTableHash* tb, int nitems);
static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
Uint sz, DbTableHash*);
-static int analyze_pattern(DbTableHash *tb, Eterm pattern,
- struct mp_info *mpi);
+static int analyze_pattern(DbTableHash *tb, Eterm pattern,
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi);
/*
* Method interface functions
@@ -418,39 +402,44 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object,Eterm *ret);
static int db_slot_hash(Process *p, DbTable *tbl,
Eterm slot_term, Eterm *ret);
-static int db_select_chunk_hash(Process *p, DbTable *tbl,
+static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reverse, Eterm *ret);
-static int db_select_hash(Process *p, DbTable *tbl,
+static int db_select_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reverse, Eterm *ret);
-static int db_select_count_hash(Process *p, DbTable *tbl,
- Eterm pattern, Eterm *ret);
-static int db_select_delete_hash(Process *p, DbTable *tbl,
- Eterm pattern, Eterm *ret);
-
-static int db_select_continue_hash(Process *p, DbTable *tbl,
+static int db_select_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
-static int db_select_count_continue_hash(Process *p, DbTable *tbl,
+static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_count_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
+static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
static int db_select_delete_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
+
+static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_replace_continue_hash(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+
static int db_take_hash(Process *, DbTable *, Eterm, Eterm *);
-static void db_print_hash(int to,
+static void db_print_hash(fmtfn_t to,
void *to_arg,
int show,
DbTable *tbl);
-static int db_free_table_hash(DbTable *tbl);
+static int db_free_empty_table_hash(DbTable *tbl);
-static int db_free_table_continue_hash(DbTable *tbl);
+static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds);
static void db_foreach_offheap_hash(DbTable *,
void (*)(ErlOffHeap *, void *),
void *);
-static int db_delete_all_objects_hash(Process* p, DbTable* tbl);
+static SWord db_delete_all_objects_hash(Process* p, DbTable* tbl, SWord reds);
#ifdef HARDDEBUG
static void db_check_table_hash(DbTableHash *tb);
#endif
@@ -463,9 +452,10 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle);
static ERTS_INLINE void try_shrink(DbTableHash* tb)
{
int nactive = NACTIVE(tb);
- if (nactive > SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN)
+ int nitems = NITEMS(tb);
+ if (nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive)
&& !IS_FIXED(tb)) {
- shrink(tb, nactive);
+ shrink(tb, nitems);
}
}
@@ -474,7 +464,8 @@ static ERTS_INLINE void try_shrink(DbTableHash* tb)
static ERTS_INLINE int has_live_key(DbTableHash* tb, HashDbTerm* b,
Eterm key, HashValue hval)
{
- if (b->hvalue != hval) return 0;
+ if (b->hvalue != hval || is_pseudo_deleted(b))
+ return 0;
else {
Eterm itemKey = GETKEY(tb, b->dbterm.tpl);
ASSERT(!is_header(itemKey));
@@ -487,7 +478,8 @@ static ERTS_INLINE int has_live_key(DbTableHash* tb, HashDbTerm* b,
static ERTS_INLINE int has_key(DbTableHash* tb, HashDbTerm* b,
Eterm key, HashValue hval)
{
- if (b->hvalue != hval && b->hvalue != INVALID_HASH) return 0;
+ if (b->hvalue != hval)
+ return 0;
else {
Eterm itemKey = GETKEY(tb, b->dbterm.tpl);
ASSERT(!is_header(itemKey));
@@ -547,17 +539,14 @@ DbTableMethod db_hash =
db_select_delete_continue_hash,
db_select_count_hash,
db_select_count_continue_hash,
+ db_select_replace_hash,
+ db_select_replace_continue_hash,
db_take_hash,
db_delete_all_objects_hash,
- db_free_table_hash,
+ db_free_empty_table_hash,
db_free_table_continue_hash,
db_print_hash,
db_foreach_offheap_hash,
-#ifdef HARDDEBUG
- db_check_table_hash,
-#else
- NULL,
-#endif
db_lookup_dbterm_hash,
db_finalize_dbterm_hash
};
@@ -579,7 +568,7 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
{
/*int tries = 0;*/
DEBUG_WAIT();
- if (erts_smp_atomic_cmpxchg_relb(&tb->fixdel,
+ if (erts_atomic_cmpxchg_relb(&tb->fixdel,
(erts_aint_t) fixdel,
(erts_aint_t) NULL) != (erts_aint_t) NULL) {
/* Oboy, must join lists */
@@ -588,13 +577,13 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
erts_aint_t exp_tail;
while (last->next != NULL) last = last->next;
- was_tail = erts_smp_atomic_read_acqb(&tb->fixdel);
+ was_tail = erts_atomic_read_acqb(&tb->fixdel);
do { /* Lockless atomic list insertion */
exp_tail = was_tail;
last->next = (FixedDeletion*) exp_tail;
/*++tries;*/
DEBUG_WAIT();
- was_tail = erts_smp_atomic_cmpxchg_relb(&tb->fixdel,
+ was_tail = erts_atomic_cmpxchg_relb(&tb->fixdel,
(erts_aint_t) fixdel,
exp_tail);
}while (was_tail != exp_tail);
@@ -605,97 +594,110 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
** Table interface routines ie what's called by the bif's
*/
-void db_unfix_table_hash(DbTableHash *tb)
+SWord db_unfix_table_hash(DbTableHash *tb)
{
FixedDeletion* fixdel;
+ SWord work = 0;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
- || (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock)
- && !tb->common.is_thread_safe));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
+ || (erts_lc_rwmtx_is_rlocked(&tb->common.rwlock)
+ && !tb->common.is_thread_safe));
restart:
- fixdel = (FixedDeletion*) erts_smp_atomic_xchg_mb(&tb->fixdel,
- (erts_aint_t) NULL);
- while (fixdel != NULL) {
- FixedDeletion *fx = fixdel;
- int ix = fx->slot;
- HashDbTerm **bp;
- HashDbTerm *b;
- erts_smp_rwmtx_t* lck = WLOCK_HASH(tb,ix);
-
- if (IS_FIXED(tb)) { /* interrupted by fixer */
- WUNLOCK_HASH(lck);
- restore_fixdel(tb,fixdel);
- if (!IS_FIXED(tb)) {
- goto restart; /* unfixed again! */
- }
- return;
- }
- if (ix < NACTIVE(tb)) {
- bp = &BUCKET(tb, ix);
- b = *bp;
-
- while (b != NULL) {
- if (b->hvalue == INVALID_HASH) {
- *bp = b->next;
- free_term(tb, b);
- b = *bp;
- } else {
- bp = &b->next;
- b = b->next;
- }
- }
- }
- /* else slot has been joined and purged by shrink() */
- WUNLOCK_HASH(lck);
- fixdel = fx->next;
- erts_db_free(ERTS_ALC_T_DB_FIX_DEL,
- (DbTable *) tb,
- (void *) fx,
- sizeof(FixedDeletion));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
+ fixdel = (FixedDeletion*) erts_atomic_xchg_mb(&tb->fixdel,
+ (erts_aint_t) NULL);
+ while (fixdel) {
+ FixedDeletion *free_me;
+
+ do {
+ HashDbTerm **bp;
+ HashDbTerm *b;
+ HashDbTerm *free_us = NULL;
+ erts_rwmtx_t* lck;
+
+ lck = WLOCK_HASH(tb, fixdel->slot);
+
+ if (IS_FIXED(tb)) { /* interrupted by fixer */
+ WUNLOCK_HASH(lck);
+ restore_fixdel(tb,fixdel);
+ if (!IS_FIXED(tb)) {
+ goto restart; /* unfixed again! */
+ }
+ return work;
+ }
+ if (fixdel->slot < NACTIVE(tb)) {
+ bp = &BUCKET(tb, fixdel->slot);
+ b = *bp;
+
+ while (b != NULL) {
+ if (is_pseudo_deleted(b)) {
+ HashDbTerm* nxt = b->next;
+ b->next = free_us;
+ free_us = b;
+ work++;
+ b = *bp = nxt;
+ } else {
+ bp = &b->next;
+ b = b->next;
+ }
+ }
+ }
+ /* else slot has been joined and purged by shrink() */
+ WUNLOCK_HASH(lck);
+ free_term_list(tb, free_us);
+
+ }while (fixdel->all && fixdel->slot-- > 0);
+
+ free_me = fixdel;
+ fixdel = fixdel->next;
+ free_fixdel(tb, free_me);
+ work++;
}
/* ToDo: Maybe try grow/shrink the table as well */
+
+ return work;
}
int db_create_hash(Process *p, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
- erts_smp_atomic_init_nob(&tb->szm, SEGSZ_MASK);
- erts_smp_atomic_init_nob(&tb->nactive, SEGSZ);
- erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL);
- erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL);
- SET_SEGTAB(tb, alloc_ext_seg(tb,0,NULL)->segtab);
+ erts_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK);
+ erts_atomic_init_nob(&tb->nactive, FIRST_SEGSZ);
+ erts_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL);
+ erts_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL);
+ SET_SEGTAB(tb, tb->first_segtab);
tb->nsegs = NSEG_1;
- tb->nslots = SEGSZ;
+ tb->nslots = FIRST_SEGSZ;
+ tb->first_segtab[0] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_SEGMENT(FIRST_SEGSZ));
+ sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ));
- erts_smp_atomic_init_nob(&tb->is_resizing, 0);
-#ifdef ERTS_SMP
+ erts_atomic_init_nob(&tb->is_resizing, 0);
if (tb->common.type & DB_FINE_LOCKED) {
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
int i;
if (tb->common.type & DB_FREQ_READ)
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
if (erts_ets_rwmtx_spin_count >= 0)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
- tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
- (DbTable *) tb,
- sizeof(DbTableHashFineLocks));
+ tb->locks = (DbTableHashFineLocks*) erts_db_alloc(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
+ (DbTable *) tb,
+ sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
- "db_hash_slot", make_small(i));
+ erts_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
+ "db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
}
- /* This important property is needed to guarantee that the buckets
+ /* This important property is needed to guarantee the two buckets
* involved in a grow/shrink operation it protected by the same lock:
*/
- ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
+ ASSERT(erts_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
}
else { /* coarse locking */
tb->locks = NULL;
}
ERTS_THR_MEMORY_BARRIER;
-#endif /* ERST_SMP */
return DB_ERROR_NONE;
}
@@ -703,22 +705,12 @@ static int db_first_hash(Process *p, DbTable *tbl, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
Uint ix = 0;
- erts_smp_rwmtx_t* lck = RLOCK_HASH(tb,ix);
+ erts_rwmtx_t* lck = RLOCK_HASH(tb,ix);
HashDbTerm* list;
- for (;;) {
- list = BUCKET(tb,ix);
- if (list != NULL) {
- if (list->hvalue == INVALID_HASH) {
- list = next(tb,&ix,&lck,list);
- }
- break;
- }
- if ((ix=next_slot(tb,ix,&lck)) == 0) {
- list = NULL;
- break;
- }
- }
+ list = BUCKET(tb,ix);
+ list = next_live(tb, &ix, &lck, list);
+
if (list != NULL) {
*ret = db_copy_key(p, tbl, &list->dbterm);
RUNLOCK_HASH(lck);
@@ -736,7 +728,7 @@ static int db_next_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
HashValue hval;
Uint ix;
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
lck = RLOCK_HASH(tb,hval);
@@ -755,13 +747,13 @@ static int db_next_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
}
/* Key found */
- b = next(tb, &ix, &lck, b);
+ b = next_live(tb, &ix, &lck, b->next);
if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
while (b != 0) {
if (!has_live_key(tb, b, key, hval)) {
break;
}
- b = next(tb, &ix, &lck, b);
+ b = next_live(tb, &ix, &lck, b->next);
}
}
if (b == NULL) {
@@ -783,7 +775,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
HashDbTerm** bp;
HashDbTerm* b;
HashDbTerm* q;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int nitems;
int ret = DB_ERROR_NONE;
@@ -808,8 +800,9 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
*/
if (tb->common.status & DB_SET) {
HashDbTerm* bnext = b->next;
- if (b->hvalue == INVALID_HASH) {
- erts_smp_atomic_inc_nob(&tb->common.nitems);
+ if (is_pseudo_deleted(b)) {
+ erts_atomic_inc_nob(&tb->common.nitems);
+ b->pseudo_deleted = 0;
}
else if (key_clash_fail) {
ret = DB_ERROR_BADKEY;
@@ -817,14 +810,14 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
}
q = replace_dbterm(tb, b, obj);
q->next = bnext;
- q->hvalue = hval; /* In case of INVALID_HASH */
+ ASSERT(q->hvalue == hval);
*bp = q;
goto Ldone;
}
else if (key_clash_fail) { /* && (DB_BAG || DB_DUPLICATE_BAG) */
q = b;
do {
- if (q->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(q)) {
ret = DB_ERROR_BADKEY;
goto Ldone;
}
@@ -836,9 +829,10 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
q = b;
do {
if (db_eq(&tb->common,obj,&q->dbterm)) {
- if (q->hvalue == INVALID_HASH) {
- erts_smp_atomic_inc_nob(&tb->common.nitems);
- q->hvalue = hval;
+ if (is_pseudo_deleted(q)) {
+ erts_atomic_inc_nob(&tb->common.nitems);
+ q->pseudo_deleted = 0;
+ ASSERT(q->hvalue == hval);
if (q != b) { /* must move to preserve key insertion order */
*qp = q->next;
q->next = b;
@@ -856,17 +850,17 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
Lnew:
q = new_dbterm(tb, obj);
q->hvalue = hval;
+ q->pseudo_deleted = 0;
q->next = b;
*bp = q;
- nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems);
+ nitems = erts_atomic_inc_read_nob(&tb->common.nitems);
WUNLOCK_HASH(lck);
{
int nactive = NACTIVE(tb);
- if (nitems > nactive * (CHAIN_LEN+1) && !IS_FIXED(tb)) {
- grow(tb, nactive);
+ if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
+ grow(tb, nitems);
}
}
- CHECK_TABLES();
return DB_ERROR_NONE;
Ldone:
@@ -884,14 +878,13 @@ get_term_list(Process *p, DbTableHash *tb, Eterm key, HashValue hval,
if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
while (b2 && has_key(tb, b2, key, hval)) {
- if (b2->hvalue != INVALID_HASH)
+ if (!is_pseudo_deleted(b2))
sz += b2->dbterm.size + 2;
b2 = b2->next;
}
}
copy = build_term_list(p, b1, b2, sz, tb);
- CHECK_TABLES();
if (bend) {
*bend = b2;
}
@@ -904,7 +897,7 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
HashValue hval;
int ix;
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
lck = RLOCK_HASH(tb,hval);
@@ -923,70 +916,6 @@ done:
RUNLOCK_HASH(lck);
return DB_ERROR_NONE;
}
-
-int db_get_element_array(DbTable *tbl,
- Eterm key,
- int ndex,
- Eterm *ret,
- int *num_ret)
-{
- DbTableHash *tb = &tbl->hash;
- HashValue hval;
- int ix;
- HashDbTerm* b1;
- int num = 0;
- int retval;
- erts_smp_rwmtx_t* lck;
-
- ASSERT(!IS_FIXED(tbl)); /* no support for fixed tables here */
-
- hval = MAKE_HASH(key);
- lck = RLOCK_HASH(tb, hval);
- ix = hash_to_ix(tb, hval);
- b1 = BUCKET(tb, ix);
-
- while(b1 != 0) {
- if (has_live_key(tb,b1,key,hval)) {
- if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
- HashDbTerm* b;
- HashDbTerm* b2 = b1->next;
-
- while(b2 != NULL && has_live_key(tb,b2,key,hval)) {
- if (ndex > arityval(b2->dbterm.tpl[0])) {
- retval = DB_ERROR_BADITEM;
- goto done;
- }
- b2 = b2->next;
- }
-
- b = b1;
- while(b != b2) {
- if (num < *num_ret) {
- ret[num++] = b->dbterm.tpl[ndex];
- } else {
- retval = DB_ERROR_NONE;
- goto done;
- }
- b = b->next;
- }
- *num_ret = num;
- }
- else {
- ASSERT(*num_ret > 0);
- ret[0] = b1->dbterm.tpl[ndex];
- *num_ret = 1;
- }
- retval = DB_ERROR_NONE;
- goto done;
- }
- b1 = b1->next;
- }
- retval = DB_ERROR_BADKEY;
-done:
- RUNLOCK_HASH(lck);
- return retval;
-}
-
static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret)
{
@@ -994,7 +923,7 @@ static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret)
HashValue hval;
int ix;
HashDbTerm* b1;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
ix = hash_to_ix(tb, hval);
@@ -1023,7 +952,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
HashValue hval;
int ix;
HashDbTerm* b1;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int retval;
hval = MAKE_HASH(key);
@@ -1045,7 +974,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
while(b2 != NULL && has_key(tb,b2,key,hval)) {
if (ndex > arityval(b2->dbterm.tpl[0])
- && b2->hvalue != INVALID_HASH) {
+ && !is_pseudo_deleted(b2)) {
retval = DB_ERROR_BADITEM;
goto done;
}
@@ -1053,7 +982,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
}
b = b1;
while(b != b2) {
- if (b->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(b)) {
Eterm *hp;
Eterm copy = db_copy_element_from_ets(&tb->common, p,
&b->dbterm, ndex, &hp, 2);
@@ -1079,54 +1008,6 @@ done:
}
/*
- * Very internal interface, removes elements of arity two from
- * BAG. Used for the PID meta table
- */
-int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value)
-{
- DbTableHash *tb = &tbl->hash;
- HashValue hval;
- int ix;
- HashDbTerm** bp;
- HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
- int found = 0;
-
- hval = MAKE_HASH(key);
- lck = WLOCK_HASH(tb,hval);
- ix = hash_to_ix(tb, hval);
- bp = &BUCKET(tb, ix);
- b = *bp;
-
- ASSERT(!IS_FIXED(tb));
- ASSERT((tb->common.status & DB_BAG));
- ASSERT(!tb->common.compress);
-
- while(b != 0) {
- if (has_live_key(tb,b,key,hval)) {
- found = 1;
- if ((arityval(b->dbterm.tpl[0]) == 2) &&
- EQ(value, b->dbterm.tpl[2])) {
- *bp = b->next;
- free_term(tb, b);
- erts_smp_atomic_dec_nob(&tb->common.nitems);
- b = *bp;
- break;
- }
- } else if (found) {
- break;
- }
- bp = &b->next;
- b = b->next;
- }
- WUNLOCK_HASH(lck);
- if (found) {
- try_shrink(tb);
- }
- return DB_ERROR_NONE;
-}
-
-/*
** NB, this is for the db_erase/2 bif.
*/
int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
@@ -1136,7 +1017,8 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
int ix;
HashDbTerm** bp;
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ HashDbTerm* free_us = NULL;
+ erts_rwmtx_t* lck;
int nitems_diff = 0;
hval = MAKE_HASH(key);
@@ -1151,16 +1033,17 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
if (nitems_diff == -1 && IS_FIXED(tb)
&& add_fixed_deletion(tb, ix, 0)) {
/* Pseudo remove (no need to keep several of same key) */
- b->hvalue = INVALID_HASH;
+ b->pseudo_deleted = 1;
} else {
- *bp = b->next;
- free_term(tb, b);
- b = *bp;
+ HashDbTerm* next = b->next;
+ b->next = free_us;
+ free_us = b;
+ b = *bp = next;
continue;
}
}
else {
- if (nitems_diff && b->hvalue != INVALID_HASH)
+ if (nitems_diff && !is_pseudo_deleted(b))
break;
}
bp = &b->next;
@@ -1168,9 +1051,10 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
+ free_term_list(tb, free_us);
*ret = am_true;
return DB_ERROR_NONE;
}
@@ -1185,7 +1069,8 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
int ix;
HashDbTerm** bp;
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ HashDbTerm* free_us = NULL;
+ erts_rwmtx_t* lck;
int nitems_diff = 0;
int nkeys = 0;
Eterm key;
@@ -1203,13 +1088,14 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
if (db_eq(&tb->common,object, &b->dbterm)) {
--nitems_diff;
if (nkeys==1 && IS_FIXED(tb) && add_fixed_deletion(tb,ix,0)) {
- b->hvalue = INVALID_HASH; /* Pseudo remove */
+ b->pseudo_deleted = 1;
bp = &b->next;
b = b->next;
} else {
- *bp = b->next;
- free_term(tb, b);
- b = *bp;
+ HashDbTerm* next = b->next;
+ b->next = free_us;
+ free_us = b;
+ b = *bp = next;
}
if (tb->common.status & (DB_DUPLICATE_BAG)) {
continue;
@@ -1218,7 +1104,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
}
}
}
- else if (nitems_diff && b->hvalue != INVALID_HASH) {
+ else if (nitems_diff && !is_pseudo_deleted(b)) {
break;
}
bp = &b->next;
@@ -1226,9 +1112,10 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
+ free_term_list(tb, free_us);
*ret = am_true;
return DB_ERROR_NONE;
}
@@ -1237,7 +1124,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
Sint slot;
int retval;
int nactive;
@@ -1264,831 +1151,1136 @@ static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
/*
- * This is just here so I can take care of the return value
- * that is to be sent during a trap (the BIF_TRAP macros explicitly returns)
+ * Match traversal callbacks
*/
-static BIF_RETTYPE bif_trap1(Export *bif,
- Process *p,
- Eterm p1)
+
+typedef struct match_callbacks_t_ match_callbacks_t;
+struct match_callbacks_t_
{
- BIF_TRAP1(bif, p, p1);
-}
-
+/* Called when no match is possible.
+ * context_ptr: Pointer to context
+ * ret: Pointer to traversal function term return.
+ *
+ * Both the direct return value and 'ret' are used as the traversal function return values.
+ */
+ int (*on_nothing_can_match)(match_callbacks_t* ctx, Eterm* ret);
+
+/* Called for each match result.
+ * context_ptr: Pointer to context
+ * slot_ix: Current slot index
+ * current_ptr_ptr: Triple pointer to either the bucket or the 'next' pointer in the previous element;
+ * can be (carefully) used to adjust iteration when deleting or replacing elements.
+ * match_res: The result of running the match program against the current term.
+ *
+ * Should return 1 for successful match, 0 otherwise.
+ */
+ int (*on_match_res)(match_callbacks_t* ctx, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr, Eterm match_res);
+
+/* Called when either we've matched enough elements in this cycle or EOT was reached.
+ * context_ptr: Pointer to context
+ * slot_ix: Current slot index
+ * got: How many elements have been matched so far
+ * iterations_left: Number of intended iterations (down from an initial max.) left in this traversal cycle
+ * mpp: Double pointer to the compiled match program
+ * ret: Pointer to traversal function term return.
+ *
+ * Both the direct return value and 'ret' are used as the traversal function return values.
+ * If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
+ */
+ int (*on_loop_ended)(match_callbacks_t* ctx, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret);
+
+/* Called when it's time to trap
+ * context_ptr: Pointer to context
+ * slot_ix: Current slot index
+ * got: How many elements have been matched so far
+ * mpp: Double pointer to the compiled match program
+ * ret: Pointer to traversal function term return.
+ *
+ * Both the direct return value and 'ret' are used as the traversal function return values.
+ * If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
+ */
+ int (*on_trap)(match_callbacks_t* ctx, Sint slot_ix, Sint got, Binary** mpp,
+ Eterm* ret);
+
+};
+
+
/*
- * Continue collecting select matches, this may happen either due to a trap
- * or when the user calls ets:select/1
+ * Begin hash table match traversal
*/
-static int db_select_continue_hash(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+static int match_traverse(Process* p, DbTableHash* tb,
+ Eterm pattern,
+ extra_match_validator_t extra_match_validator, /* Optional */
+ Sint chunk_size, /* If 0, no chunking */
+ Sint iterations_left, /* Nr. of iterations left */
+ Eterm** hpp, /* Heap */
+ int lock_for_write, /* Set to 1 if we're going to delete or
+ modify existing terms */
+ match_callbacks_t* ctx,
+ Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- Sint slot_ix;
- Sint save_slot_ix;
- Sint chunk_size;
- int all_objects;
- Binary *mp;
- int num_left = 1000;
- HashDbTerm *current = 0;
- Eterm match_list;
- Eterm *hp;
+ Sint slot_ix; /* Slot index */
+ HashDbTerm** current_ptr; /* Refers to either the bucket pointer or
+ * the 'next' pointer in the previous term
+ */
+ HashDbTerm* saved_current; /* Helper to avoid double skip on match */
+ struct mp_info mpi;
+ unsigned current_list_pos = 0; /* Prefound buckets list index */
Eterm match_res;
- Sint got;
- Eterm *tptr;
- erts_smp_rwmtx_t* lck;
+ Sint got = 0; /* Matched terms counter */
+ erts_rwmtx_t* lck; /* Slot lock */
+ int ret_value;
+ erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
+ = (lock_for_write ? WLOCK_HASH : RLOCK_HASH);
+ void (*unlock_hash_function)(erts_rwmtx_t*)
+ = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH);
+ Sint (*next_slot_function)(DbTableHash*, Uint, erts_rwmtx_t**)
+ = (lock_for_write ? next_slot_w : next_slot);
+
+ if ((ret_value = analyze_pattern(tb, pattern, extra_match_validator, &mpi))
+ != DB_ERROR_NONE)
+ {
+ *ret = NIL;
+ goto done;
+ }
-#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0);
+ if (!mpi.something_can_match) {
+ /* Can't possibly match anything */
+ ret_value = ctx->on_nothing_can_match(ctx, ret);
+ goto done;
+ }
- /* Decode continuation. We know it's a tuple but not the arity or anything else */
+ /*
+ * Look for initial slot / bucket
+ */
+ if (!mpi.key_given) {
+ /* Run this code if pattern is variable or GETKEY(pattern) */
+ /* is a variable */
+ slot_ix = 0;
+ lck = lock_hash_function(tb,slot_ix);
+ for (;;) {
+ ASSERT(slot_ix < NACTIVE(tb));
+ if (*(current_ptr = &BUCKET(tb,slot_ix)) != NULL) {
+ break;
+ }
+ slot_ix = next_slot_function(tb,slot_ix,&lck);
+ if (slot_ix == 0) {
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left,
+ &mpi.mp, ret);
+ goto done;
+ }
+ }
+ } else {
+ /* We have at least one */
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = lock_hash_function(tb, slot_ix);
+ current_ptr = mpi.lists[current_list_pos].bucket;
+ ASSERT(*current_ptr == BUCKET(tb,slot_ix));
+ ++current_list_pos;
+ }
- tptr = tuple_val(continuation);
+ /*
+ * Execute traversal cycle
+ */
+ for(;;) {
+ if (*current_ptr != NULL) {
+ if (!is_pseudo_deleted(*current_ptr)) {
+ match_res = db_match_dbterm(&tb->common, p, mpi.mp,
+ &(*current_ptr)->dbterm, hpp, 2);
+ saved_current = *current_ptr;
+ if (ctx->on_match_res(ctx, slot_ix, &current_ptr, match_res)) {
+ ++got;
+ }
+ --iterations_left;
+ if (*current_ptr != saved_current) {
+ /* Don't advance to next, the callback did it already */
+ continue;
+ }
+ }
+ current_ptr = &((*current_ptr)->next);
+ }
+ else if (mpi.key_given) { /* Key is bound */
+ unlock_hash_function(lck);
+ if (current_list_pos == mpi.num_lists) {
+ ret_value = ctx->on_loop_ended(ctx, -1, got, iterations_left, &mpi.mp, ret);
+ goto done;
+ } else {
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = lock_hash_function(tb, slot_ix);
+ current_ptr = mpi.lists[current_list_pos].bucket;
+ ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
+ ++current_list_pos;
+ }
+ }
+ else { /* Key is variable */
+ if ((slot_ix = next_slot_function(tb,slot_ix,&lck)) == 0) {
+ slot_ix = -1;
+ break;
+ }
+ if (chunk_size && got >= chunk_size) {
+ unlock_hash_function(lck);
+ break;
+ }
+ if (iterations_left <= 0 || MBUF(p)) {
+ /*
+ * We have either reached our limit, or just created some heap fragments.
+ * Since many heap fragments will make the GC slower, trap and GC now.
+ */
+ unlock_hash_function(lck);
+ ret_value = ctx->on_trap(ctx, slot_ix, got, &mpi.mp, ret);
+ goto done;
+ }
+ current_ptr = &BUCKET(tb,slot_ix);
+ }
+ }
- if (arityval(*tptr) != 6)
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
-
- if (!is_small(tptr[2]) || !is_small(tptr[3]) || !is_binary(tptr[4]) ||
- !(is_list(tptr[5]) || tptr[5] == NIL) || !is_small(tptr[6]))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if ((chunk_size = signed_val(tptr[3])) < 0)
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if (!(thing_subtag(*binary_val(tptr[4])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
- if (!IsMatchProgBinary(mp))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- all_objects = mp->flags & BIN_FLAG_ALL_OBJECTS;
- match_list = tptr[5];
- if ((got = signed_val(tptr[6])) < 0)
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left, &mpi.mp, ret);
- slot_ix = signed_val(tptr[2]);
- if (slot_ix < 0 /* EOT */
- || (chunk_size && got >= chunk_size)) {
- goto done; /* Already got all or enough in the match_list */
+done:
+ /* We should only jump directly to this label if
+ * we've already called ctx->nothing_can_match / loop_ended / trap
+ */
+ if (mpi.mp != NULL) {
+ erts_bin_free(mpi.mp);
}
-
- lck = RLOCK_HASH(tb,slot_ix);
- if (slot_ix >= NACTIVE(tb)) {
- RUNLOCK_HASH(lck);
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ if (mpi.lists != mpi.dlists) {
+ erts_free(ERTS_ALC_T_DB_SEL_LIST,
+ (void *) mpi.lists);
}
+ return ret_value;
- while ((current = BUCKET(tb,slot_ix)) == NULL) {
- slot_ix = next_slot(tb, slot_ix, &lck);
- if (slot_ix == 0) {
- slot_ix = -1; /* EOT */
- goto done;
- }
- }
- for(;;) {
- if (current->hvalue != INVALID_HASH &&
- (match_res = db_match_dbterm(&tb->common, p, mp, all_objects,
- &current->dbterm, &hp, 2),
- is_value(match_res))) {
-
- match_list = CONS(hp, match_res, match_list);
- ++got;
- }
+}
- --num_left;
- save_slot_ix = slot_ix;
- if ((current = next(tb, (Uint*)&slot_ix, &lck, current)) == NULL) {
- slot_ix = -1; /* EOT */
- break;
- }
- if (slot_ix != save_slot_ix) {
- if (chunk_size && got >= chunk_size) {
- RUNLOCK_HASH(lck);
- break;
- }
- if (num_left <= 0 || MBUF(p)) {
- /*
- * We have either reached our limit, or just created some heap fragments.
- * Since many heap fragments will make the GC slower, trap and GC now.
- */
- RUNLOCK_HASH(lck);
- goto trap;
- }
- }
+/*
+ * Continue hash table match traversal
+ */
+static int match_traverse_continue(Process* p, DbTableHash* tb,
+ Sint chunk_size, /* If 0, no chunking */
+ Sint iterations_left, /* Nr. of iterations left */
+ Eterm** hpp, /* Heap */
+ Sint slot_ix, /* Slot index to resume traversal from */
+ Sint got, /* Matched terms counter */
+ Binary** mpp, /* Existing match program */
+ int lock_for_write, /* Set to 1 if we're going to delete or
+ modify existing terms */
+ match_callbacks_t* ctx,
+ Eterm* ret)
+{
+ HashDbTerm** current_ptr; /* Refers to either the bucket pointer or
+ * the 'next' pointer in the previous term
+ */
+ HashDbTerm* saved_current; /* Helper to avoid double skip on match */
+ Eterm match_res;
+ erts_rwmtx_t* lck;
+ int ret_value;
+ erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
+ = (lock_for_write ? WLOCK_HASH : RLOCK_HASH);
+ void (*unlock_hash_function)(erts_rwmtx_t*)
+ = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH);
+ Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_rwmtx_t** lck_ptr)
+ = (lock_for_write ? next_slot_w : next_slot);
+
+ if (got < 0) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
+ }
+
+ if (slot_ix < 0 /* EOT */
+ || (chunk_size && got >= chunk_size))
+ {
+ /* Already got all or enough in the match_list */
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left, mpp, ret);
+ goto done;
}
-done:
- BUMP_REDS(p, 1000 - num_left);
- if (chunk_size) {
- Eterm continuation;
- Eterm rest = NIL;
- Sint rest_size = 0;
-
- if (got > chunk_size) { /* Cannot write destructively here,
- the list may have
- been in user space */
- rest = NIL;
- hp = HAlloc(p, (got - chunk_size) * 2);
- while (got-- > chunk_size) {
- rest = CONS(hp, CAR(list_val(match_list)), rest);
- hp += 2;
- match_list = CDR(list_val(match_list));
- ++rest_size;
- }
- }
- if (rest != NIL || slot_ix >= 0) {
- hp = HAlloc(p,3+7);
- continuation = TUPLE6(hp, tptr[1], make_small(slot_ix),
- tptr[3], tptr[4], rest,
- make_small(rest_size));
- hp += 7;
- RET_TO_BIF(TUPLE2(hp, match_list, continuation),DB_ERROR_NONE);
- } else {
- if (match_list != NIL) {
- hp = HAlloc(p, 3);
- RET_TO_BIF(TUPLE2(hp, match_list, am_EOT),DB_ERROR_NONE);
- } else {
- RET_TO_BIF(am_EOT, DB_ERROR_NONE);
- }
- }
+
+ lck = lock_hash_function(tb, slot_ix);
+ if (slot_ix >= NACTIVE(tb)) { /* Is this possible? */
+ unlock_hash_function(lck);
+ *ret = NIL;
+ ret_value = DB_ERROR_BADPARAM;
+ goto done;
}
- RET_TO_BIF(match_list,DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
+ /*
+ * Resume traversal cycle from where we left
+ */
+ current_ptr = &BUCKET(tb,slot_ix);
+ for(;;) {
+ if (*current_ptr != NULL) {
+ if (!is_pseudo_deleted(*current_ptr)) {
+ match_res = db_match_dbterm(&tb->common, p, *mpp,
+ &(*current_ptr)->dbterm, hpp, 2);
+ saved_current = *current_ptr;
+ if (ctx->on_match_res(ctx, slot_ix, &current_ptr, match_res)) {
+ ++got;
+ }
+ --iterations_left;
+ if (*current_ptr != saved_current) {
+ /* Don't advance to next, the callback did it already */
+ continue;
+ }
+ }
+ current_ptr = &((*current_ptr)->next);
+ }
+ else {
+ if ((slot_ix=next_slot_function(tb,slot_ix,&lck)) == 0) {
+ slot_ix = -1;
+ break;
+ }
+ if (chunk_size && got >= chunk_size) {
+ unlock_hash_function(lck);
+ break;
+ }
+ if (iterations_left <= 0 || MBUF(p)) {
+ /*
+ * We have either reached our limit, or just created some heap fragments.
+ * Since many heap fragments will make the GC slower, trap and GC now.
+ */
+ unlock_hash_function(lck);
+ ret_value = ctx->on_trap(ctx, slot_ix, got, mpp, ret);
+ goto done;
+ }
+ current_ptr = &BUCKET(tb,slot_ix);
+ }
+ }
- hp = HAlloc(p,7);
- continuation = TUPLE6(hp, tptr[1], make_small(slot_ix), tptr[3],
- tptr[4], match_list, make_small(got));
- RET_TO_BIF(bif_trap1(&ets_select_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left, mpp, ret);
-#undef RET_TO_BIF
+done:
+ /* We should only jump directly to this label if
+ * we've already called on_loop_ended / on_trap
+ */
+ return ret_value;
}
-static int db_select_hash(Process *p, DbTable *tbl,
- Eterm pattern, int reverse,
- Eterm *ret)
-{
- return db_select_chunk_hash(p, tbl, pattern, 0, reverse, ret);
-}
-static int db_select_chunk_hash(Process *p, DbTable *tbl,
- Eterm pattern, Sint chunk_size,
- int reverse, /* not used */
- Eterm *ret)
+/*
+ * Common traversal trapping/continuation code;
+ * used by select_count, select_delete and select_replace,
+ * as well as their continuation-handling counterparts.
+ */
+
+static ERTS_INLINE int on_simple_trap(Export* trap_function,
+ Process* p,
+ DbTableHash* tb,
+ Eterm tid,
+ Eterm* prev_continuation_tptr,
+ Sint slot_ix,
+ Sint got,
+ Binary** mpp,
+ Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- struct mp_info mpi;
- Sint slot_ix;
- HashDbTerm *current = 0;
- unsigned current_list_pos = 0;
- Eterm match_list;
- Eterm match_res;
- Eterm *hp;
- int num_left = 1000;
- Uint got = 0;
- Eterm continuation;
- int errcode;
+ Eterm* hp;
+ Eterm egot;
Eterm mpb;
- erts_smp_rwmtx_t* lck;
+ Eterm continuation;
+ int is_first_trap = (prev_continuation_tptr == NULL);
+ size_t base_halloc_sz = (is_first_trap ? ERTS_MAGIC_REF_THING_SIZE : 0);
+ BUMP_ALL_REDS(p);
+ if (IS_USMALL(0, got)) {
+ hp = HAllocX(p, base_halloc_sz + 5, ERTS_MAGIC_REF_THING_SIZE);
+ egot = make_small(got);
+ }
+ else {
+ hp = HAllocX(p, base_halloc_sz + BIG_UINT_HEAP_SIZE + 5,
+ ERTS_MAGIC_REF_THING_SIZE);
+ egot = uint_to_big(got, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
-#define RET_TO_BIF(Term,RetVal) do { \
- if (mpi.mp != NULL) { \
- erts_bin_free(mpi.mp); \
- } \
- if (mpi.lists != mpi.dlists) { \
- erts_free(ERTS_ALC_T_DB_SEL_LIST, \
- (void *) mpi.lists); \
- } \
- *ret = (Term); \
- return RetVal; \
- } while(0)
+ if (is_first_trap) {
+ if (is_atom(tid))
+ tid = erts_db_make_tid(p, &tb->common);
+ mpb = erts_db_make_match_prog_ref(p, *mpp, &hp);
+ *mpp = NULL; /* otherwise the caller will destroy it */
+ }
+ else {
+ ASSERT(!is_atom(tid));
+ mpb = prev_continuation_tptr[3];
+ }
+ continuation = TUPLE4(
+ hp,
+ tid,
+ make_small(slot_ix),
+ mpb,
+ egot);
+ ERTS_BIF_PREP_TRAP1(*ret, trap_function, p, continuation);
+ return DB_ERROR_NONE;
+}
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
- RET_TO_BIF(NIL,errcode);
- }
+static ERTS_INLINE int unpack_simple_continuation(Eterm continuation,
+ Eterm** tptr_ptr,
+ Eterm* tid_ptr,
+ Sint* slot_ix_p,
+ Binary** mpp,
+ Sint* got_p)
+{
+ Eterm* tptr;
+ ASSERT(is_tuple(continuation));
+ tptr = tuple_val(continuation);
+ if (arityval(*tptr) != 4)
+ return 1;
- if (!mpi.something_can_match) {
- if (chunk_size) {
- RET_TO_BIF(am_EOT, DB_ERROR_NONE); /* We're done */
- }
- RET_TO_BIF(NIL, DB_ERROR_NONE);
- /* can't possibly match anything */
+ if (! is_small(tptr[2]) || !(is_big(tptr[4]) || is_small(tptr[4]))) {
+ return 1;
}
- if (!mpi.key_given) {
- /* Run this code if pattern is variable or GETKEY(pattern) */
- /* is a variable */
- slot_ix = 0;
- lck = RLOCK_HASH(tb,slot_ix);
- for (;;) {
- ASSERT(slot_ix < NACTIVE(tb));
- if ((current = BUCKET(tb,slot_ix)) != NULL) {
- break;
- }
- slot_ix = next_slot(tb,slot_ix,&lck);
- if (slot_ix == 0) {
- if (chunk_size) {
- RET_TO_BIF(am_EOT, DB_ERROR_NONE); /* We're done */
- }
- RET_TO_BIF(NIL,DB_ERROR_NONE);
- }
- }
- } else {
- /* We have at least one */
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(current == BUCKET(tb,slot_ix));
- ++current_list_pos;
+ *tptr_ptr = tptr;
+ *tid_ptr = tptr[1];
+ *slot_ix_p = unsigned_val(tptr[2]);
+ *mpp = erts_db_get_match_prog_binary_unchecked(tptr[3]);
+ if (is_big(tptr[4])) {
+ *got_p = big_to_uint32(tptr[4]);
+ }
+ else {
+ *got_p = unsigned_val(tptr[4]);
}
+ return 0;
+}
- match_list = NIL;
- for(;;) {
- if (current != NULL) {
- if (current->hvalue != INVALID_HASH) {
- match_res = db_match_dbterm(&tb->common, p, mpi.mp, 0,
- &current->dbterm, &hp, 2);
- if (is_value(match_res)) {
- match_list = CONS(hp, match_res, match_list);
- ++got;
- }
- }
- current = current->next;
- }
- else if (mpi.key_given) { /* Key is bound */
- RUNLOCK_HASH(lck);
- if (current_list_pos == mpi.num_lists) {
- slot_ix = -1; /* EOT */
- goto done;
- } else {
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
- }
- else { /* Key is variable */
- --num_left;
+/*
+ *
+ * select / select_chunk match traversal
+ *
+ */
- if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) {
- slot_ix = -1;
- break;
- }
- if (chunk_size && got >= chunk_size) {
- RUNLOCK_HASH(lck);
- break;
- }
- if (num_left <= 0 || MBUF(p)) {
- /*
- * We have either reached our limit, or just created some heap fragments.
- * Since many heap fragments will make the GC slower, trap and GC now.
- */
- RUNLOCK_HASH(lck);
- goto trap;
- }
- current = BUCKET(tb,slot_ix);
+#define MAX_SELECT_CHUNK_ITERATIONS 1000
+
+typedef struct {
+ match_callbacks_t base;
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* hp;
+ Sint chunk_size;
+ Eterm match_list;
+ Eterm* prev_continuation_tptr;
+} select_chunk_context_t;
+
+static int select_chunk_on_nothing_can_match(match_callbacks_t* ctx_base, Eterm* ret)
+{
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
+ *ret = (ctx->chunk_size > 0 ? am_EOT : NIL);
+ return DB_ERROR_NONE;
+}
+
+static int select_chunk_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
+{
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
+ if (is_value(match_res)) {
+ ctx->match_list = CONS(ctx->hp, match_res, ctx->match_list);
+ return 1;
+ }
+ return 0;
+}
+
+static int select_chunk_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
+{
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
+ Eterm mpb;
+
+ if (iterations_left == MAX_SELECT_CHUNK_ITERATIONS) {
+ /* We didn't get to iterate a single time, which means EOT */
+ ASSERT(ctx->match_list == NIL);
+ *ret = (ctx->chunk_size > 0 ? am_EOT : NIL);
+ return DB_ERROR_NONE;
+ }
+ else {
+ ASSERT(iterations_left < MAX_SELECT_CHUNK_ITERATIONS);
+ BUMP_REDS(ctx->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ if (ctx->chunk_size) {
+ Eterm continuation;
+ Eterm rest = NIL;
+ Sint rest_size = 0;
+
+ if (got > ctx->chunk_size) { /* Split list in return value and 'rest' */
+ Eterm tmp = ctx->match_list;
+ rest = ctx->match_list;
+ while (got-- > ctx->chunk_size + 1) {
+ tmp = CDR(list_val(tmp));
+ ++rest_size;
+ }
+ ++rest_size;
+ ctx->match_list = CDR(list_val(tmp));
+ CDR(list_val(tmp)) = NIL; /* Destructive, the list has never
+ been in 'user space' */
+ }
+ if (rest != NIL || slot_ix >= 0) { /* Need more calls */
+ Eterm tid = ctx->tid;
+ ctx->hp = HAllocX(ctx->p,
+ 3 + 7 + ERTS_MAGIC_REF_THING_SIZE,
+ ERTS_MAGIC_REF_THING_SIZE);
+ mpb = erts_db_make_match_prog_ref(ctx->p, *mpp, &ctx->hp);
+ if (is_atom(tid))
+ tid = erts_db_make_tid(ctx->p,
+ &ctx->tb->common);
+ continuation = TUPLE6(
+ ctx->hp,
+ tid,
+ make_small(slot_ix),
+ make_small(ctx->chunk_size),
+ mpb, rest,
+ make_small(rest_size));
+ *mpp = NULL; /* Otherwise the caller will destroy it */
+ ctx->hp += 7;
+ *ret = TUPLE2(ctx->hp, ctx->match_list, continuation);
+ return DB_ERROR_NONE;
+ } else { /* All data is exhausted */
+ if (ctx->match_list != NIL) { /* No more data to search but still a
+ result to return to the caller */
+ ctx->hp = HAlloc(ctx->p, 3);
+ *ret = TUPLE2(ctx->hp, ctx->match_list, am_EOT);
+ return DB_ERROR_NONE;
+ } else { /* Reached the end of the ttable with no data to return */
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
+ }
}
+ *ret = ctx->match_list;
+ return DB_ERROR_NONE;
}
-done:
- BUMP_REDS(p, 1000 - num_left);
- if (chunk_size) {
- Eterm continuation;
- Eterm rest = NIL;
- Sint rest_size = 0;
-
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- if (got > chunk_size) { /* Split list in return value and 'rest' */
- Eterm tmp = match_list;
- rest = match_list;
- while (got-- > chunk_size + 1) {
- tmp = CDR(list_val(tmp));
- ++rest_size;
- }
- ++rest_size;
- match_list = CDR(list_val(tmp));
- CDR(list_val(tmp)) = NIL; /* Destructive, the list has never
- been in 'user space' */
- }
- if (rest != NIL || slot_ix >= 0) { /* Need more calls */
- hp = HAlloc(p,3+7+PROC_BIN_SIZE);
- mpb =db_make_mp_binary(p,(mpi.mp),&hp);
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- continuation = TUPLE6(hp, tb->common.id,make_small(slot_ix),
- make_small(chunk_size),
- mpb, rest,
- make_small(rest_size));
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- hp += 7;
- RET_TO_BIF(TUPLE2(hp, match_list, continuation),DB_ERROR_NONE);
- } else { /* All data is exhausted */
- if (match_list != NIL) { /* No more data to search but still a
- result to return to the caller */
- hp = HAlloc(p, 3);
- RET_TO_BIF(TUPLE2(hp, match_list, am_EOT),DB_ERROR_NONE);
- } else { /* Reached the end of the ttable with no data to return */
- RET_TO_BIF(am_EOT, DB_ERROR_NONE);
- }
- }
+}
+
+static int select_chunk_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
+ Eterm mpb;
+ Eterm continuation;
+ Eterm* hp;
+
+ BUMP_ALL_REDS(ctx->p);
+
+ if (ctx->prev_continuation_tptr == NULL) {
+ Eterm tid = ctx->tid;
+ /* First time we're trapping */
+ hp = HAllocX(ctx->p, 7 + ERTS_MAGIC_REF_THING_SIZE,
+ ERTS_MAGIC_REF_THING_SIZE);
+ if (is_atom(tid))
+ tid = erts_db_make_tid(ctx->p, &ctx->tb->common);
+ mpb = erts_db_make_match_prog_ref(ctx->p, *mpp, &hp);
+ continuation = TUPLE6(
+ hp,
+ tid,
+ make_small(slot_ix),
+ make_small(ctx->chunk_size),
+ mpb,
+ ctx->match_list,
+ make_small(got));
+ *mpp = NULL; /* otherwise the caller will destroy it */
}
- RET_TO_BIF(match_list,DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- hp = HAlloc(p,7+PROC_BIN_SIZE);
- mpb =db_make_mp_binary(p,(mpi.mp),&hp);
- continuation = TUPLE6(hp, tb->common.id, make_small(slot_ix),
- make_small(chunk_size),
- mpb, match_list,
- make_small(got));
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- RET_TO_BIF(bif_trap1(&ets_select_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
+ else {
+ /* Not the first time we're trapping; reuse continuation terms */
+ hp = HAlloc(ctx->p, 7);
+ continuation = TUPLE6(
+ hp,
+ ctx->prev_continuation_tptr[1],
+ make_small(slot_ix),
+ ctx->prev_continuation_tptr[3],
+ ctx->prev_continuation_tptr[4],
+ ctx->match_list,
+ make_small(got));
+ }
+ ERTS_BIF_PREP_TRAP1(*ret, &ets_select_continue_exp, ctx->p,
+ continuation);
+ return DB_ERROR_NONE;
+}
-#undef RET_TO_BIF
+static int db_select_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern,
+ int reverse, Eterm *ret)
+{
+ return db_select_chunk_hash(p, tbl, tid, pattern, 0, reverse, ret);
+}
+static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Sint chunk_size,
+ int reverse, Eterm *ret)
+{
+ select_chunk_context_t ctx;
+
+ ctx.base.on_nothing_can_match = select_chunk_on_nothing_can_match;
+ ctx.base.on_match_res = select_chunk_on_match_res;
+ ctx.base.on_loop_ended = select_chunk_on_loop_ended;
+ ctx.base.on_trap = select_chunk_on_trap,
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.hp = NULL;
+ ctx.chunk_size = chunk_size;
+ ctx.match_list = NIL;
+ ctx.prev_continuation_tptr = NULL;
+
+ return match_traverse(
+ ctx.p, ctx.tb,
+ pattern, NULL,
+ ctx.chunk_size,
+ MAX_SELECT_CHUNK_ITERATIONS,
+ &ctx.hp, 0,
+ &ctx.base, ret);
}
-static int db_select_count_hash(Process *p,
- DbTable *tbl,
- Eterm pattern,
- Eterm *ret)
+/*
+ *
+ * select_continue match traversal
+ *
+ */
+
+static
+int select_chunk_continue_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- struct mp_info mpi;
- Uint slot_ix = 0;
- HashDbTerm* current = NULL;
- unsigned current_list_pos = 0;
- Eterm *hp;
- int num_left = 1000;
- Uint got = 0;
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
Eterm continuation;
- int errcode;
- Eterm egot;
- Eterm mpb;
- erts_smp_rwmtx_t* lck;
+ Eterm rest = NIL;
+ Eterm* hp;
+
+ ASSERT(iterations_left <= MAX_SELECT_CHUNK_ITERATIONS);
+ BUMP_REDS(ctx->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ if (ctx->chunk_size) {
+ Sint rest_size = 0;
+ if (got > ctx->chunk_size) {
+ /* Cannot write destructively here,
+ the list may have
+ been in user space */
+ hp = HAlloc(ctx->p, (got - ctx->chunk_size) * 2);
+ while (got-- > ctx->chunk_size) {
+ rest = CONS(hp, CAR(list_val(ctx->match_list)), rest);
+ hp += 2;
+ ctx->match_list = CDR(list_val(ctx->match_list));
+ ++rest_size;
+ }
+ }
+ if (rest != NIL || slot_ix >= 0) {
+ hp = HAlloc(ctx->p, 3 + 7);
+ continuation = TUPLE6(
+ hp,
+ ctx->prev_continuation_tptr[1],
+ make_small(slot_ix),
+ ctx->prev_continuation_tptr[3],
+ ctx->prev_continuation_tptr[4],
+ rest,
+ make_small(rest_size));
+ hp += 7;
+ *ret = TUPLE2(hp, ctx->match_list, continuation);
+ return DB_ERROR_NONE;
+ } else {
+ if (ctx->match_list != NIL) {
+ hp = HAlloc(ctx->p, 3);
+ *ret = TUPLE2(hp, ctx->match_list, am_EOT);
+ return DB_ERROR_NONE;
+ } else {
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
+ }
+ }
+ *ret = ctx->match_list;
+ return DB_ERROR_NONE;
+}
-#define RET_TO_BIF(Term,RetVal) do { \
- if (mpi.mp != NULL) { \
- erts_bin_free(mpi.mp); \
- } \
- if (mpi.lists != mpi.dlists) { \
- erts_free(ERTS_ALC_T_DB_SEL_LIST, \
- (void *) mpi.lists); \
- } \
- *ret = (Term); \
- return RetVal; \
- } while(0)
+/*
+ * This is called when select traps
+ */
+static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation,
+ Eterm* ret)
+{
+ select_chunk_context_t ctx;
+ Eterm* tptr;
+ Eterm tid;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size;
+ Eterm match_list;
+ Sint iterations_left = MAX_SELECT_CHUNK_ITERATIONS;
+ /* Decode continuation. We know it's a tuple but not the arity or anything else */
+ ASSERT(is_tuple(continuation));
+ tptr = tuple_val(continuation);
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
- RET_TO_BIF(NIL,errcode);
- }
+ if (arityval(*tptr) != 6)
+ goto badparam;
- if (!mpi.something_can_match) {
- RET_TO_BIF(make_small(0), DB_ERROR_NONE);
- /* can't possibly match anything */
- }
+ if (!is_small(tptr[2]) || !is_small(tptr[3]) ||
+ !(is_list(tptr[5]) || tptr[5] == NIL) || !is_small(tptr[6]))
+ goto badparam;
+ if ((chunk_size = signed_val(tptr[3])) < 0)
+ goto badparam;
- if (!mpi.key_given) {
- /* Run this code if pattern is variable or GETKEY(pattern) */
- /* is a variable */
- slot_ix = 0;
- lck = RLOCK_HASH(tb,slot_ix);
- current = BUCKET(tb,slot_ix);
- } else {
- /* We have at least one */
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(current == BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
+ mp = erts_db_get_match_prog_binary(tptr[4]);
+ if (mp == NULL)
+ goto badparam;
- for(;;) {
- if (current != NULL) {
- if (current->hvalue != INVALID_HASH) {
- if (db_match_dbterm(&tb->common, p, mpi.mp, 0,
- &current->dbterm, NULL,0) == am_true) {
- ++got;
- }
- --num_left;
- }
- current = current->next;
- }
- else { /* next bucket */
- if (mpi.key_given) { /* Key is bound */
- RUNLOCK_HASH(lck);
- if (current_list_pos == mpi.num_lists) {
- goto done;
- } else {
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = RLOCK_HASH(tb, slot_ix);
- current = *(mpi.lists[current_list_pos].bucket);
- ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
- }
- else {
- if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- RUNLOCK_HASH(lck);
- goto trap;
- }
- current = BUCKET(tb,slot_ix);
- }
- }
- }
-done:
- BUMP_REDS(p, 1000 - num_left);
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (IS_USMALL(0, got)) {
- hp = HAlloc(p, PROC_BIN_SIZE + 5);
- egot = make_small(got);
- }
- else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + PROC_BIN_SIZE + 5);
- egot = uint_to_big(got, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- mpb,
- egot);
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
+ if ((got = signed_val(tptr[6])) < 0)
+ goto badparam;
+
+ tid = tptr[1];
+ slot_ix = signed_val(tptr[2]);
+ match_list = tptr[5];
-#undef RET_TO_BIF
+ /* Proceed */
+ ctx.base.on_match_res = select_chunk_on_match_res;
+ ctx.base.on_loop_ended = select_chunk_continue_on_loop_ended;
+ ctx.base.on_trap = select_chunk_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.hp = NULL;
+ ctx.chunk_size = chunk_size;
+ ctx.match_list = match_list;
+ ctx.prev_continuation_tptr = tptr;
+
+ return match_traverse_continue(
+ ctx.p, ctx.tb, ctx.chunk_size,
+ iterations_left, &ctx.hp, slot_ix, got, &mp, 0,
+ &ctx.base, ret);
+
+badparam:
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
}
-static int db_select_delete_hash(Process *p,
- DbTable *tbl,
- Eterm pattern,
- Eterm *ret)
-{
- DbTableHash *tb = &tbl->hash;
- struct mp_info mpi;
- Uint slot_ix = 0;
- HashDbTerm **current = NULL;
- unsigned current_list_pos = 0;
- Eterm *hp;
- int num_left = 1000;
- Uint got = 0;
- Eterm continuation;
- int errcode;
- Uint last_pseudo_delete = (Uint)-1;
- Eterm mpb;
- Eterm egot;
-#ifdef ERTS_SMP
- erts_aint_t fixated_by_me = tb->common.is_thread_safe ? 0 : 1; /* ToDo: something nicer */
-#else
- erts_aint_t fixated_by_me = 0;
-#endif
- erts_smp_rwmtx_t* lck;
+#undef MAX_SELECT_CHUNK_ITERATIONS
-#define RET_TO_BIF(Term,RetVal) do { \
- if (mpi.mp != NULL) { \
- erts_bin_free(mpi.mp); \
- } \
- if (mpi.lists != mpi.dlists) { \
- erts_free(ERTS_ALC_T_DB_SEL_LIST, \
- (void *) mpi.lists); \
- } \
- *ret = (Term); \
- return RetVal; \
- } while(0)
+/*
+ *
+ * select_count match traversal
+ *
+ */
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
- RET_TO_BIF(NIL,errcode);
- }
+#define MAX_SELECT_COUNT_ITERATIONS 1000
- if (!mpi.something_can_match) {
- RET_TO_BIF(make_small(0), DB_ERROR_NONE);
- /* can't possibly match anything */
- }
+typedef struct {
+ match_callbacks_t base;
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* prev_continuation_tptr;
+} select_count_context_t;
- if (!mpi.key_given) {
- /* Run this code if pattern is variable or GETKEY(pattern) */
- /* is a variable */
- lck = WLOCK_HASH(tb,slot_ix);
- current = &BUCKET(tb,slot_ix);
- } else {
- /* We have at least one */
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = WLOCK_HASH(tb, slot_ix);
- current = mpi.lists[current_list_pos++].bucket;
- ASSERT(*current == BUCKET(tb,slot_ix));
- }
+static int select_count_on_nothing_can_match(match_callbacks_t* ctx_base,
+ Eterm* ret)
+{
+ *ret = make_small(0);
+ return DB_ERROR_NONE;
+}
+static int select_count_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
+{
+ return (match_res == am_true);
+}
- for(;;) {
- if ((*current) == NULL) {
- if (mpi.key_given) { /* Key is bound */
- WUNLOCK_HASH(lck);
- if (current_list_pos == mpi.num_lists) {
- goto done;
- } else {
- slot_ix = mpi.lists[current_list_pos].ix;
- lck = WLOCK_HASH(tb, slot_ix);
- current = mpi.lists[current_list_pos].bucket;
- ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
- ++current_list_pos;
- }
- } else {
- if ((slot_ix=next_slot_w(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- WUNLOCK_HASH(lck);
- goto trap;
- }
- current = &BUCKET(tb,slot_ix);
- }
- }
- else if ((*current)->hvalue == INVALID_HASH) {
- current = &((*current)->next);
- }
- else {
- int did_erase = 0;
- if (db_match_dbterm(&tb->common, p, mpi.mp, 0,
- &(*current)->dbterm, NULL, 0) == am_true) {
- HashDbTerm *del;
- if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */
- if (slot_ix != last_pseudo_delete) {
- if (!add_fixed_deletion(tb, slot_ix, fixated_by_me))
- goto do_erase;
- last_pseudo_delete = slot_ix;
- }
- (*current)->hvalue = INVALID_HASH;
- } else {
- do_erase:
- del = *current;
- *current = (*current)->next;
- free_term(tb, del);
- did_erase = 1;
- }
- erts_smp_atomic_dec_nob(&tb->common.nitems);
- ++got;
- }
- --num_left;
- if (!did_erase) {
- current = &((*current)->next);
- }
- }
- }
-done:
- BUMP_REDS(p, 1000 - num_left);
- if (got) {
- try_shrink(tb);
- }
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (IS_USMALL(0, got)) {
- hp = HAlloc(p, PROC_BIN_SIZE + 5);
- egot = make_small(got);
- }
- else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + PROC_BIN_SIZE + 5);
- egot = uint_to_big(got, hp);
- hp += BIG_UINT_HEAP_SIZE;
+static int select_count_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
+{
+ select_count_context_t* ctx = (select_count_context_t*) ctx_base;
+ ASSERT(iterations_left <= MAX_SELECT_COUNT_ITERATIONS);
+ BUMP_REDS(ctx->p, MAX_SELECT_COUNT_ITERATIONS - iterations_left);
+ *ret = erts_make_integer(got, ctx->p);
+ return DB_ERROR_NONE;
+}
+
+static int select_count_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ select_count_context_t* ctx = (select_count_context_t*) ctx_base;
+ return on_simple_trap(
+ &ets_select_count_continue_exp,
+ ctx->p,
+ ctx->tb,
+ ctx->tid,
+ ctx->prev_continuation_tptr,
+ slot_ix, got, mpp, ret);
+}
+
+static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ select_count_context_t ctx;
+ Sint iterations_left = MAX_SELECT_COUNT_ITERATIONS;
+ Sint chunk_size = 0;
+
+ ctx.base.on_nothing_can_match = select_count_on_nothing_can_match;
+ ctx.base.on_match_res = select_count_on_match_res;
+ ctx.base.on_loop_ended = select_count_on_loop_ended;
+ ctx.base.on_trap = select_count_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = NULL;
+
+ return match_traverse(
+ ctx.p, ctx.tb,
+ pattern, NULL,
+ chunk_size, iterations_left, NULL, 0,
+ &ctx.base, ret);
+}
+
+/*
+ * This is called when select_count traps
+ */
+static int db_select_count_continue_hash(Process* p, DbTable* tbl,
+ Eterm continuation, Eterm* ret)
+{
+ select_count_context_t ctx;
+ Eterm* tptr;
+ Eterm tid;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size = 0;
+ *ret = NIL;
+
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
}
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- mpb,
- egot);
- mpi.mp = NULL; /*otherwise the return macro will destroy it */
- RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
-#undef RET_TO_BIF
+ ctx.base.on_match_res = select_count_on_match_res;
+ ctx.base.on_loop_ended = select_count_on_loop_ended;
+ ctx.base.on_trap = select_count_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = tptr;
+ return match_traverse_continue(
+ ctx.p, ctx.tb, chunk_size,
+ MAX_SELECT_COUNT_ITERATIONS,
+ NULL, slot_ix, got, &mp, 0,
+ &ctx.base, ret);
}
+
+#undef MAX_SELECT_COUNT_ITERATIONS
+
+
/*
-** This is called when select_delete traps
-*/
-static int db_select_delete_continue_hash(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+ *
+ * select_delete match traversal
+ *
+ */
+
+#define MAX_SELECT_DELETE_ITERATIONS 1000
+
+typedef struct {
+ match_callbacks_t base;
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* prev_continuation_tptr;
+ erts_aint_t fixated_by_me;
+ Uint last_pseudo_delete;
+ HashDbTerm* free_us;
+} select_delete_context_t;
+
+static int select_delete_on_nothing_can_match(match_callbacks_t* ctx_base,
+ Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- Uint slot_ix;
- Uint last_pseudo_delete = (Uint)-1;
- HashDbTerm **current = NULL;
- Eterm *hp;
- int num_left = 1000;
- Uint got;
- Eterm *tptr;
- Binary *mp;
- Eterm egot;
- int fixated_by_me = ONLY_WRITER(p,tb) ? 0 : 1; /* ToDo: something nicer */
- erts_smp_rwmtx_t* lck;
+ *ret = make_small(0);
+ return DB_ERROR_NONE;
+}
-#define RET_TO_BIF(Term,RetVal) do { \
- *ret = (Term); \
- return RetVal; \
- } while(0)
+static int select_delete_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
+{
+ HashDbTerm** current_ptr = *current_ptr_ptr;
+ select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
+ HashDbTerm* del;
+ if (match_res != am_true)
+ return 0;
-
- tptr = tuple_val(continuation);
- slot_ix = unsigned_val(tptr[2]);
- mp = ((ProcBin *) binary_val(tptr[3]))->val;
- if (is_big(tptr[4])) {
- got = big_to_uint32(tptr[4]);
- } else {
- got = unsigned_val(tptr[4]);
+ if (NFIXED(ctx->tb) > ctx->fixated_by_me) { /* fixated by others? */
+ if (slot_ix != ctx->last_pseudo_delete) {
+ if (!add_fixed_deletion(ctx->tb, slot_ix, ctx->fixated_by_me))
+ goto do_erase;
+ ctx->last_pseudo_delete = slot_ix;
+ }
+ (*current_ptr)->pseudo_deleted = 1;
}
-
- lck = WLOCK_HASH(tb,slot_ix);
- if (slot_ix >= NACTIVE(tb)) {
- WUNLOCK_HASH(lck);
- goto done;
- }
- current = &BUCKET(tb,slot_ix);
-
- for(;;) {
- if ((*current) == NULL) {
- if ((slot_ix=next_slot_w(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- WUNLOCK_HASH(lck);
- goto trap;
- }
- current = &BUCKET(tb,slot_ix);
- }
- else if ((*current)->hvalue == INVALID_HASH) {
- current = &((*current)->next);
- }
- else {
- int did_erase = 0;
- if (db_match_dbterm(&tb->common, p, mp, 0,
- &(*current)->dbterm, NULL, 0) == am_true) {
- HashDbTerm *del;
- if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */
- if (slot_ix != last_pseudo_delete) {
- if (!add_fixed_deletion(tb, slot_ix, fixated_by_me))
- goto do_erase;
- last_pseudo_delete = slot_ix;
- }
- (*current)->hvalue = INVALID_HASH;
- } else {
- do_erase:
- del = *current;
- *current = (*current)->next;
- free_term(tb, del);
- did_erase = 1;
- }
- erts_smp_atomic_dec_nob(&tb->common.nitems);
- ++got;
- }
-
- --num_left;
- if (!did_erase) {
- current = &((*current)->next);
- }
- }
+ else {
+ do_erase:
+ del = *current_ptr;
+ *current_ptr = (*current_ptr)->next; // replace pointer to term using next
+ del->next = ctx->free_us;
+ ctx->free_us = del;
}
-done:
- BUMP_REDS(p, 1000 - num_left);
+ erts_atomic_dec_nob(&ctx->tb->common.nitems);
+
+ return 1;
+}
+
+static int select_delete_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
+{
+ select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
+ free_term_list(ctx->tb, ctx->free_us);
+ ctx->free_us = NULL;
+ ASSERT(iterations_left <= MAX_SELECT_DELETE_ITERATIONS);
+ BUMP_REDS(ctx->p, MAX_SELECT_DELETE_ITERATIONS - iterations_left);
if (got) {
- try_shrink(tb);
- }
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (IS_USMALL(0, got)) {
- hp = HAlloc(p, 5);
- egot = make_small(got);
- }
- else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + 5);
- egot = uint_to_big(got, hp);
- hp += BIG_UINT_HEAP_SIZE;
+ try_shrink(ctx->tb);
}
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- tptr[3],
- egot);
- RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
+ *ret = erts_make_integer(got, ctx->p);
+ return DB_ERROR_NONE;
+}
-#undef RET_TO_BIF
+static int select_delete_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
+ free_term_list(ctx->tb, ctx->free_us);
+ ctx->free_us = NULL;
+ return on_simple_trap(
+ &ets_select_delete_continue_exp,
+ ctx->p,
+ ctx->tb,
+ ctx->tid,
+ ctx->prev_continuation_tptr,
+ slot_ix, got, mpp, ret);
+}
+static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ select_delete_context_t ctx;
+ Sint chunk_size = 0;
+
+ ctx.base.on_nothing_can_match = select_delete_on_nothing_can_match;
+ ctx.base.on_match_res = select_delete_on_match_res;
+ ctx.base.on_loop_ended = select_delete_on_loop_ended;
+ ctx.base.on_trap = select_delete_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = NULL;
+ ctx.fixated_by_me = ctx.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */
+ ctx.last_pseudo_delete = (Uint) -1;
+ ctx.free_us = NULL;
+
+ return match_traverse(
+ ctx.p, ctx.tb,
+ pattern, NULL,
+ chunk_size,
+ MAX_SELECT_DELETE_ITERATIONS, NULL, 1,
+ &ctx.base, ret);
}
-
+
/*
-** This is called when select_count traps
-*/
-static int db_select_count_continue_hash(Process *p,
- DbTable *tbl,
- Eterm continuation,
- Eterm *ret)
+ * This is called when select_delete traps
+ */
+static int db_select_delete_continue_hash(Process* p, DbTable* tbl,
+ Eterm continuation, Eterm* ret)
{
- DbTableHash *tb = &tbl->hash;
- Uint slot_ix;
- HashDbTerm* current;
- Eterm *hp;
- int num_left = 1000;
- Uint got;
- Eterm *tptr;
- Binary *mp;
- Eterm egot;
- erts_smp_rwmtx_t* lck;
+ select_delete_context_t ctx;
+ Eterm* tptr;
+ Eterm tid;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size = 0;
+
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
+ }
+
+ ctx.base.on_match_res = select_delete_on_match_res;
+ ctx.base.on_loop_ended = select_delete_on_loop_ended;
+ ctx.base.on_trap = select_delete_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = tptr;
+ ctx.fixated_by_me = ONLY_WRITER(p, ctx.tb) ? 0 : 1; /* TODO: something nicer */
+ ctx.last_pseudo_delete = (Uint) -1;
+ ctx.free_us = NULL;
+
+ return match_traverse_continue(
+ ctx.p, ctx.tb, chunk_size,
+ MAX_SELECT_DELETE_ITERATIONS,
+ NULL, slot_ix, got, &mp, 1,
+ &ctx.base, ret);
+}
-#define RET_TO_BIF(Term,RetVal) do { \
- *ret = (Term); \
- return RetVal; \
- } while(0)
+#undef MAX_SELECT_DELETE_ITERATIONS
-
- tptr = tuple_val(continuation);
- slot_ix = unsigned_val(tptr[2]);
- mp = ((ProcBin *) binary_val(tptr[3]))->val;
- if (is_big(tptr[4])) {
- got = big_to_uint32(tptr[4]);
- } else {
- got = unsigned_val(tptr[4]);
- }
-
- lck = RLOCK_HASH(tb, slot_ix);
- if (slot_ix >= NACTIVE(tb)) { /* Is this posible? */
- RUNLOCK_HASH(lck);
- goto done;
- }
- current = BUCKET(tb,slot_ix);
-
- for(;;) {
- if (current != NULL) {
- if (current->hvalue == INVALID_HASH) {
- current = current->next;
- continue;
- }
- if (db_match_dbterm(&tb->common, p, mp, 0, &current->dbterm,
- NULL, 0) == am_true) {
- ++got;
- }
- --num_left;
- current = current->next;
- }
- else { /* next bucket */
- if ((slot_ix = next_slot(tb,slot_ix,&lck)) == 0) {
- goto done;
- }
- if (num_left <= 0) {
- RUNLOCK_HASH(lck);
- goto trap;
- }
- current = BUCKET(tb,slot_ix);
- }
- }
-done:
- BUMP_REDS(p, 1000 - num_left);
- RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
-trap:
- BUMP_ALL_REDS(p);
- if (IS_USMALL(0, got)) {
- hp = HAlloc(p, 5);
- egot = make_small(got);
- }
- else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + 5);
- egot = uint_to_big(got, hp);
- hp += BIG_UINT_HEAP_SIZE;
+/*
+ *
+ * select_replace match traversal
+ *
+ */
+
+#define MAX_SELECT_REPLACE_ITERATIONS 1000
+
+typedef struct {
+ match_callbacks_t base;
+ Process* p;
+ DbTableHash* tb;
+ Eterm tid;
+ Eterm* prev_continuation_tptr;
+} select_replace_context_t;
+
+static int select_replace_on_nothing_can_match(match_callbacks_t* ctx_base,
+ Eterm* ret)
+{
+ *ret = make_small(0);
+ return DB_ERROR_NONE;
+}
+
+static int select_replace_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
+{
+ select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
+ DbTableHash* tb = ctx->tb;
+ HashDbTerm* new;
+ HashDbTerm* next;
+ HashValue hval;
+
+ if (is_value(match_res)) {
+#ifdef DEBUG
+ Eterm key = db_getkey(tb->common.keypos, match_res);
+ ASSERT(is_value(key));
+ ASSERT(eq(key, GETKEY(tb, (**current_ptr_ptr)->dbterm.tpl)));
+#endif
+ next = (**current_ptr_ptr)->next;
+ hval = (**current_ptr_ptr)->hvalue;
+ new = new_dbterm(tb, match_res);
+ new->next = next;
+ new->hvalue = hval;
+ new->pseudo_deleted = 0;
+ free_term(tb, **current_ptr_ptr);
+ **current_ptr_ptr = new; /* replace 'next' pointer in previous object */
+ *current_ptr_ptr = &((**current_ptr_ptr)->next); /* advance to next object */
+ return 1;
}
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
- tptr[3],
- egot);
- RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p,
- continuation),
- DB_ERROR_NONE);
+ return 0;
+}
-#undef RET_TO_BIF
+static int select_replace_on_loop_ended(match_callbacks_t* ctx_base, Sint slot_ix,
+ Sint got, Sint iterations_left,
+ Binary** mpp, Eterm* ret)
+{
+ select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
+ ASSERT(iterations_left <= MAX_SELECT_REPLACE_ITERATIONS);
+ /* the more objects we've replaced, the more reductions we've consumed */
+ BUMP_REDS(ctx->p,
+ MIN(MAX_SELECT_REPLACE_ITERATIONS * 2,
+ (MAX_SELECT_REPLACE_ITERATIONS - iterations_left) + (int)got));
+ *ret = erts_make_integer(got, ctx->p);
+ return DB_ERROR_NONE;
+}
+static int select_replace_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
+{
+ select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
+ return on_simple_trap(
+ &ets_select_replace_continue_exp,
+ ctx->p,
+ ctx->tb,
+ ctx->tid,
+ ctx->prev_continuation_tptr,
+ slot_ix, got, mpp, ret);
}
-
+
+static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret)
+{
+ select_replace_context_t ctx;
+ Sint chunk_size = 0;
+
+ /* Bag implementation presented both semantic consistency and performance issues,
+ * unsupported for now
+ */
+ ASSERT(!(tbl->hash.common.status & DB_BAG));
+
+ ctx.base.on_nothing_can_match = select_replace_on_nothing_can_match;
+ ctx.base.on_match_res = select_replace_on_match_res;
+ ctx.base.on_loop_ended = select_replace_on_loop_ended;
+ ctx.base.on_trap = select_replace_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = NULL;
+
+ return match_traverse(
+ ctx.p, ctx.tb,
+ pattern, db_match_keeps_key,
+ chunk_size,
+ MAX_SELECT_REPLACE_ITERATIONS, NULL, 1,
+ &ctx.base, ret);
+}
+
+/*
+ * This is called when select_replace traps
+ */
+static int db_select_replace_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret)
+{
+ select_replace_context_t ctx;
+ Eterm* tptr;
+ Eterm tid ;
+ Binary* mp;
+ Sint got;
+ Sint slot_ix;
+ Sint chunk_size = 0;
+ *ret = NIL;
+
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ *ret = NIL;
+ return DB_ERROR_BADPARAM;
+ }
+
+ /* Proceed */
+ ctx.base.on_match_res = select_replace_on_match_res;
+ ctx.base.on_loop_ended = select_replace_on_loop_ended;
+ ctx.base.on_trap = select_replace_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = tptr;
+
+ return match_traverse_continue(
+ ctx.p, ctx.tb, chunk_size,
+ MAX_SELECT_REPLACE_ITERATIONS,
+ NULL, slot_ix, got, &mp, 1,
+ &ctx.base, ret);
+}
+
+
static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashDbTerm **bp, *b;
+ HashDbTerm *free_us = NULL;
HashValue hval = MAKE_HASH(key);
- erts_smp_rwmtx_t *lck = WLOCK_HASH(tb, hval);
+ erts_rwmtx_t *lck = WLOCK_HASH(tb, hval);
int ix = hash_to_ix(tb, hval);
int nitems_diff = 0;
@@ -2104,12 +2296,13 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
&& add_fixed_deletion(tb, ix, 0)) {
/* Pseudo remove (no need to keep several of same key) */
bp = &b->next;
- b->hvalue = INVALID_HASH;
+ b->pseudo_deleted = 1;
b = b->next;
} else {
- *bp = b->next;
- free_term(tb, b);
- b = *bp;
+ HashDbTerm* next = b->next;
+ b->next = free_us;
+ free_us = b;
+ b = *bp = next;
}
}
break;
@@ -2117,12 +2310,14 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
+ free_term_list(tb, free_us);
return DB_ERROR_NONE;
}
+
/*
** Other interface routines (not directly coupled to one bif)
*/
@@ -2132,30 +2327,56 @@ void db_initialize_hash(void)
}
-int db_mark_all_deleted_hash(DbTable *tbl)
+static SWord db_mark_all_deleted_hash(DbTable *tbl, SWord reds)
{
+ const int LOOPS_PER_REDUCTION = 8;
DbTableHash *tb = &tbl->hash;
- HashDbTerm* list;
+ FixedDeletion* fixdel;
+ SWord loops = reds * LOOPS_PER_REDUCTION;
int i;
- ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb));
+ ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb));
- for (i = 0; i < NACTIVE(tb); i++) {
- if ((list = BUCKET(tb,i)) != NULL) {
- add_fixed_deletion(tb, i, 0);
- do {
- list->hvalue = INVALID_HASH;
- list = list->next;
- }while(list != NULL);
- }
+ fixdel = (FixedDeletion*) erts_atomic_read_nob(&tb->fixdel);
+ if (fixdel && fixdel->trap) {
+ /* Continue after trap */
+ ASSERT(fixdel->all);
+ ASSERT(fixdel->slot < NACTIVE(tb));
+ i = fixdel->slot;
}
- erts_smp_atomic_set_nob(&tb->common.nitems, 0);
- return DB_ERROR_NONE;
+ else {
+ /* First call */
+ int ok;
+ fixdel = alloc_fixdel(tb);
+ ok = link_fixdel(tb, fixdel, 0);
+ ASSERT(ok); (void)ok;
+ i = 0;
+ }
+
+ do {
+ HashDbTerm* b;
+ for (b = BUCKET(tb,i); b; b = b->next)
+ b->pseudo_deleted = 1;
+ } while (++i < NACTIVE(tb) && --loops > 0);
+
+ if (i < NACTIVE(tb)) {
+ /* Yield */
+ fixdel->slot = i;
+ fixdel->all = 0;
+ fixdel->trap = 1;
+ return -1;
+ }
+
+ fixdel->slot = NACTIVE(tb) - 1;
+ fixdel->all = 1;
+ fixdel->trap = 0;
+ erts_atomic_set_nob(&tb->common.nitems, 0);
+ return loops < 0 ? 0 : loops / LOOPS_PER_REDUCTION;
}
/* Display hash table contents (for dump) */
-static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
+static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
DbHashStats stats;
@@ -2163,7 +2384,6 @@ static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
erts_print(to, to_arg, "Buckets: %d\n", NACTIVE(tb));
-#ifdef ERTS_SMP
i = tbl->common.is_thread_safe;
/* If crash dumping we set table to thread safe in order to
avoid taking any locks */
@@ -2173,9 +2393,6 @@ static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
db_calc_stats_hash(&tbl->hash, &stats);
tbl->common.is_thread_safe = i;
-#else
- db_calc_stats_hash(&tbl->hash, &stats);
-#endif
erts_print(to, to_arg, "Chain Length Avg: %f\n", stats.avg_chain_len);
erts_print(to, to_arg, "Chain Length Max: %d\n", stats.max_chain_len);
@@ -2197,7 +2414,7 @@ static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
continue;
erts_print(to, to_arg, "%d: [", i);
while(list != 0) {
- if (list->hvalue == INVALID_HASH)
+ if (is_pseudo_deleted(list))
erts_print(to, to_arg, "*");
if (tb->common.compress) {
Eterm key = GETKEY(tb, list->dbterm.tpl);
@@ -2216,50 +2433,42 @@ static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
}
}
-/* release all memory occupied by a single table */
-static int db_free_table_hash(DbTable *tbl)
+static int db_free_empty_table_hash(DbTable *tbl)
{
- while (!db_free_table_continue_hash(tbl))
+ ASSERT(NITEMS(tbl) == 0);
+ while (db_free_table_continue_hash(tbl, ERTS_SWORD_MAX) < 0)
;
return 0;
}
-static int db_free_table_continue_hash(DbTable *tbl)
+static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
{
DbTableHash *tb = &tbl->hash;
- int done;
- FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read_acqb(&tb->fixdel);
- ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb));
+ FixedDeletion* fixdel = (FixedDeletion*) erts_atomic_read_acqb(&tb->fixdel);
+ ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE));
- done = 0;
while (fixdel != NULL) {
FixedDeletion *fx = fixdel;
fixdel = fx->next;
- erts_db_free(ERTS_ALC_T_DB_FIX_DEL,
- (DbTable *) tb,
- (void *) fx,
- sizeof(FixedDeletion));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
- if (++done >= 2*DELETE_RECORD_LIMIT) {
- erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel);
- return 0; /* Not done */
+ free_fixdel(tb, fx);
+ if (--reds < 0) {
+ erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel);
+ return reds; /* Not done */
}
}
- erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL);
+ erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL);
- done /= 2;
while(tb->nslots != 0) {
- free_seg(tb, 1);
+ reds -= EXT_SEGSZ/64 + free_seg(tb, 1);
/*
* If we have done enough work, get out here.
*/
- if (++done >= (DELETE_RECORD_LIMIT / CHAIN_LEN / SEGSZ)) {
- return 0; /* Not done */
+ if (reds < 0) {
+ return reds; /* Not done */
}
}
-#ifdef ERTS_SMP
if (tb->locks != NULL) {
int i;
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
@@ -2269,9 +2478,8 @@ static int db_free_table_continue_hash(DbTable *tbl)
(void*)tb->locks, sizeof(DbTableHashFineLocks));
tb->locks = NULL;
}
-#endif
- ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable));
- return 1; /* Done */
+ ASSERT(erts_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable));
+ return reds; /* Done */
}
@@ -2284,7 +2492,8 @@ static int db_free_table_continue_hash(DbTable *tbl)
** slots should be searched. Also compiles the match program
*/
static int analyze_pattern(DbTableHash *tb, Eterm pattern,
- struct mp_info *mpi)
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi)
{
Eterm *ptpl;
Eterm lst, tpl, ttpl;
@@ -2300,7 +2509,6 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
mpi->num_lists = 0;
mpi->key_given = 1;
mpi->something_can_match = 0;
- mpi->all_objects = 1;
mpi->mp = NULL;
for (lst = pattern; is_list(lst); lst = CDR(list_val(lst)))
@@ -2322,7 +2530,10 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
i = 0;
for(lst = pattern; is_list(lst); lst = CDR(list_val(lst))) {
- Eterm body;
+ Eterm match;
+ Eterm guard;
+ Eterm body;
+
ttpl = CAR(list_val(lst));
if (!is_tuple(ttpl)) {
if (buff != sbuff) {
@@ -2337,12 +2548,19 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
}
return DB_ERROR_BADPARAM;
}
- matches[i] = tpl = ptpl[1];
- guards[i] = ptpl[2];
+ matches[i] = match = tpl = ptpl[1];
+ guards[i] = guard = ptpl[2];
bodies[i] = body = ptpl[3];
+
+ if(extra_validator != NULL && !extra_validator(tb->common.keypos, match, guard, body)) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+
if (!is_list(body) || CDR(list_val(body)) != NIL ||
CAR(list_val(body)) != am_DollarUnderscore) {
- mpi->all_objects = 0;
}
++i;
if (!(mpi->key_given)) {
@@ -2357,7 +2575,7 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
if (!db_has_variable(key)) { /* Bound key */
int ix, search_slot;
HashDbTerm** bp;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
lck = RLOCK_HASH(tb,hval);
ix = hash_to_ix(tb, hval);
@@ -2414,69 +2632,58 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
return DB_ERROR_NONE;
}
-static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix,
- struct segment** old_segtab)
+static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix)
{
- int nsegs;
- struct ext_segment* eseg;
+ struct segment** old_segtab = SEGTAB(tb);
+ int nsegs = 0;
+ struct ext_segtab* est;
+ ASSERT(seg_ix >= NSEG_1);
switch (seg_ix) {
- case 0: nsegs = NSEG_1; break;
- case 1: nsegs = NSEG_2; break;
- default: nsegs = seg_ix + NSEG_INC; break;
- }
- eseg = (struct ext_segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG,
- (DbTable *) tb,
- SIZEOF_EXTSEG(nsegs));
- ASSERT(eseg != NULL);
- sys_memset(&eseg->s, 0, sizeof(struct segment));
- IF_DEBUG(eseg->s.is_ext_segment = 1);
- eseg->prev_segtab = old_segtab;
- eseg->nsegs = nsegs;
- if (old_segtab) {
- ASSERT(nsegs > tb->nsegs);
- sys_memcpy(eseg->segtab, old_segtab, tb->nsegs*sizeof(struct segment*));
- }
+ case NSEG_1: nsegs = NSEG_2; break;
+ default: nsegs = seg_ix + NSEG_INC; break;
+ }
+ ASSERT(nsegs > tb->nsegs);
+ est = (struct ext_segtab*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_EXT_SEGTAB(nsegs));
+ est->nsegs = nsegs;
+ est->prev_segtab = old_segtab;
+ est->prev_nsegs = tb->nsegs;
+ sys_memcpy(est->segtab, old_segtab, tb->nsegs*sizeof(struct segment*));
#ifdef DEBUG
- sys_memset(&eseg->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*));
+ sys_memset(&est->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*));
#endif
- eseg->segtab[seg_ix] = &eseg->s;
- return eseg;
+ return est;
}
/* Extend table with one new segment
*/
-static int alloc_seg(DbTableHash *tb)
+static void alloc_seg(DbTableHash *tb)
{
- int seg_ix = tb->nslots >> SEGSZ_EXP;
-
- if (seg_ix+1 == tb->nsegs) { /* New segtab needed (extended segment) */
- struct segment** segtab = SEGTAB(tb);
- struct ext_segment* seg = alloc_ext_seg(tb, seg_ix, segtab);
- if (seg == NULL) return 0;
- segtab[seg_ix] = &seg->s;
- /* We don't use the new segtab until next call (see "shrink race") */
- }
- else { /* Just a new plain segment */
- struct segment** segtab;
- if (seg_ix == tb->nsegs) { /* Time to start use segtab from last call */
- struct ext_segment* eseg;
- eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1];
- MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment);
- SET_SEGTAB(tb, eseg->segtab);
- tb->nsegs = eseg->nsegs;
- }
- ASSERT(seg_ix < tb->nsegs);
- segtab = SEGTAB(tb);
- ASSERT(segtab[seg_ix] == NULL);
- segtab[seg_ix] = (struct segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG,
- (DbTable *) tb,
- sizeof(struct segment));
- if (segtab[seg_ix] == NULL) return 0;
- sys_memset(segtab[seg_ix], 0, sizeof(struct segment));
- }
- tb->nslots += SEGSZ;
- return 1;
+ int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots);
+ struct segment** segtab;
+
+ ASSERT(seg_ix > 0);
+ if (seg_ix == tb->nsegs) { /* New segtab needed */
+ struct ext_segtab* est = alloc_ext_segtab(tb, seg_ix);
+ SET_SEGTAB(tb, est->segtab);
+ tb->nsegs = est->nsegs;
+ }
+ ASSERT(seg_ix < tb->nsegs);
+ segtab = SEGTAB(tb);
+ segtab[seg_ix] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_SEGMENT(EXT_SEGSZ));
+ sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(EXT_SEGSZ));
+ tb->nslots += EXT_SEGSZ;
+}
+
+static void dealloc_ext_segtab(void* lop_data)
+{
+ struct ext_segtab* est = (struct ext_segtab*) lop_data;
+
+ erts_free(ERTS_ALC_T_DB_SEG, est);
}
/* Shrink table by freeing the top segment
@@ -2484,20 +2691,20 @@ static int alloc_seg(DbTableHash *tb)
*/
static int free_seg(DbTableHash *tb, int free_records)
{
- const int seg_ix = (tb->nslots >> SEGSZ_EXP) - 1;
+ const int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots) - 1;
struct segment** const segtab = SEGTAB(tb);
- struct ext_segment* const top = (struct ext_segment*) segtab[seg_ix];
- int bytes;
+ struct segment* const segp = segtab[seg_ix];
+ Uint seg_sz;
int nrecords = 0;
- ASSERT(top != NULL);
+ ASSERT(segp != NULL);
#ifndef DEBUG
if (free_records)
#endif
{
- int i;
- for (i=0; i<SEGSZ; ++i) {
- HashDbTerm* p = top->s.buckets[i];
+ int i = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ;
+ while (i--) {
+ HashDbTerm* p = segp->buckets[i];
while(p != 0) {
HashDbTerm* nxt = p->next;
ASSERT(free_records); /* segment not empty as assumed? */
@@ -2507,55 +2714,44 @@ static int free_seg(DbTableHash *tb, int free_records)
}
}
}
-
- /* The "shrink race":
- * We must avoid deallocating an extended segment while its segtab may
- * still be used by other threads.
- * The trick is to stop use a segtab one call earlier. That is, stop use
- * a segtab when the segment above it is deallocated. When the segtab is
- * later deallocated, it has not been used for a very long time.
- * It is even theoretically safe as we have by then rehashed the entire
- * segment, seizing *all* locks, so there cannot exist any retarded threads
- * still hanging in BUCKET macro with an old segtab pointer.
- * For this to work, we must of course allocate a new segtab one call
- * earlier in alloc_seg() as well. And this is also the reason why
- * the minimum size of the first segtab is 2 and not 1 (NSEG_1).
- */
- if (seg_ix == tb->nsegs-1 || seg_ix==0) { /* Dealloc extended segment */
- MY_ASSERT(top->s.is_ext_segment);
- ASSERT(segtab != top->segtab || seg_ix==0);
- bytes = SIZEOF_EXTSEG(top->nsegs);
- }
- else { /* Dealloc plain segment */
- struct ext_segment* newtop = (struct ext_segment*) segtab[seg_ix-1];
- MY_ASSERT(!top->s.is_ext_segment);
-
- if (segtab == newtop->segtab) { /* New top segment is extended */
- MY_ASSERT(newtop->s.is_ext_segment);
- if (newtop->prev_segtab != NULL) {
- /* Time to use a smaller segtab */
- SET_SEGTAB(tb, newtop->prev_segtab);
- tb->nsegs = seg_ix;
- ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs);
- }
- else {
- ASSERT(NSEG_1 > 2 && seg_ix==1);
- }
- }
- bytes = sizeof(struct segment);
+ if (seg_ix >= NSEG_1) {
+ struct ext_segtab* est = ErtsContainerStruct_(segtab,struct ext_segtab,segtab);
+
+ if (seg_ix == est->prev_nsegs) { /* Dealloc extended segtab */
+ ASSERT(est->prev_segtab != NULL);
+ SET_SEGTAB(tb, est->prev_segtab);
+ tb->nsegs = est->prev_nsegs;
+
+ if (!tb->common.is_thread_safe) {
+ /*
+ * Table is doing a graceful shrink operation and we must avoid
+ * deallocating this segtab while it may still be read by other
+ * threads. Schedule deallocation with thread progress to make
+ * sure no lingering threads are still hanging in BUCKET macro
+ * with an old segtab pointer.
+ */
+ Uint sz = SIZEOF_EXT_SEGTAB(est->nsegs);
+ ASSERT(sz == ERTS_ALC_DBG_BLK_SZ(est));
+ ERTS_DB_ALC_MEM_UPDATE_(tb, sz, 0);
+ erts_schedule_thr_prgr_later_cleanup_op(dealloc_ext_segtab,
+ est,
+ &est->lop,
+ sz);
+ }
+ else
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est,
+ SIZEOF_EXT_SEGTAB(est->nsegs));
+ }
}
+ seg_sz = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ;
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(seg_sz));
- erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb,
- (void*)top, bytes);
#ifdef DEBUG
- if (seg_ix > 0) {
- segtab[seg_ix] = NULL;
- } else {
- SET_SEGTAB(tb, NULL);
- }
+ if (seg_ix < tb->nsegs)
+ SEGTAB(tb)[seg_ix] = NULL;
#endif
- tb->nslots -= SEGSZ;
+ tb->nslots -= seg_sz;
ASSERT(tb->nslots >= 0);
return nrecords;
}
@@ -2578,7 +2774,7 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
if (!sz) {
ptr = ptr1;
while(ptr != ptr2) {
- if (ptr->hvalue != INVALID_HASH)
+ if (!is_pseudo_deleted(ptr))
sz += ptr->dbterm.size + 2;
ptr = ptr->next;
}
@@ -2589,7 +2785,7 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
ptr = ptr1;
while(ptr != ptr2) {
- if (ptr->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(ptr)) {
copy = db_copy_object_from_ets(&tb->common, &ptr->dbterm, &hp, &MSO(p));
list = CONS(hp, copy, list);
hp += 2;
@@ -2605,103 +2801,106 @@ static ERTS_INLINE int
begin_resizing(DbTableHash* tb)
{
if (DB_USING_FINE_LOCKING(tb))
- return !erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1);
- else {
- if (erts_smp_atomic_read_nob(&tb->is_resizing))
- return 0;
- erts_smp_atomic_set_nob(&tb->is_resizing, 1);
- return 1;
- }
+ return !erts_atomic_xchg_acqb(&tb->is_resizing, 1);
+ else
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+ return 1;
}
static ERTS_INLINE void
done_resizing(DbTableHash* tb)
{
if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->is_resizing, 0);
- else
- erts_smp_atomic_set_nob(&tb->is_resizing, 0);
+ erts_atomic_set_relb(&tb->is_resizing, 0);
}
-/* Grow table with one new bucket.
+/* Grow table with one or more new buckets.
** Allocate new segment if needed.
*/
-static void grow(DbTableHash* tb, int nactive)
+static void grow(DbTableHash* tb, int nitems)
{
HashDbTerm** pnext;
HashDbTerm** to_pnext;
HashDbTerm* p;
- erts_smp_rwmtx_t* lck;
- int from_ix;
+ erts_rwmtx_t* lck;
+ int nactive;
+ int from_ix, to_ix;
int szm;
+ int loop_limit = 5;
- if (!begin_resizing(tb))
- return; /* already in progress */
- if (NACTIVE(tb) != nactive) {
- goto abort; /* already done (race) */
- }
-
- /* Ensure that the slot nactive exists */
- if (nactive == tb->nslots) {
- /* Time to get a new segment */
- ASSERT((nactive & SEGSZ_MASK) == 0);
- if (!alloc_seg(tb)) goto abort;
- }
- ASSERT(nactive < tb->nslots);
+ do {
+ if (!begin_resizing(tb))
+ return; /* already in progress */
+ nactive = NACTIVE(tb);
+ if (nitems <= GROW_LIMIT(nactive)) {
+ goto abort; /* already done (race) */
+ }
- szm = erts_smp_atomic_read_nob(&tb->szm);
- if (nactive <= szm) {
- from_ix = nactive & (szm >> 1);
- } else {
- ASSERT(nactive == szm+1);
- from_ix = 0;
- szm = (szm<<1) | 1;
- }
+ /* Ensure that the slot nactive exists */
+ if (nactive == tb->nslots) {
+ /* Time to get a new segment */
+ ASSERT(((nactive-FIRST_SEGSZ) & EXT_SEGSZ_MASK) == 0);
+ alloc_seg(tb);
+ }
+ ASSERT(nactive < tb->nslots);
- lck = WLOCK_HASH(tb, from_ix);
- /* Now a final double check (with the from_ix lock held)
- * that we did not get raced by a table fixer.
- */
- if (IS_FIXED(tb)) {
- WUNLOCK_HASH(lck);
- goto abort;
- }
- erts_smp_atomic_inc_nob(&tb->nactive);
- if (from_ix == 0) {
- if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->szm, szm);
- else
- erts_smp_atomic_set_nob(&tb->szm, szm);
- }
- done_resizing(tb);
+ szm = erts_atomic_read_nob(&tb->szm);
+ if (nactive <= szm) {
+ from_ix = nactive & (szm >> 1);
+ } else {
+ ASSERT(nactive == szm+1);
+ from_ix = 0;
+ szm = (szm<<1) | 1;
+ }
+ to_ix = nactive;
+
+ lck = WLOCK_HASH(tb, from_ix);
+ ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix));
+ /* Now a final double check (with the from_ix lock held)
+ * that we did not get raced by a table fixer.
+ */
+ if (IS_FIXED(tb)) {
+ WUNLOCK_HASH(lck);
+ goto abort;
+ }
+ erts_atomic_set_nob(&tb->nactive, ++nactive);
+ if (from_ix == 0) {
+ if (DB_USING_FINE_LOCKING(tb))
+ erts_atomic_set_relb(&tb->szm, szm);
+ else
+ erts_atomic_set_nob(&tb->szm, szm);
+ }
+ done_resizing(tb);
+
+ /* Finally, let's split the bucket. We try to do it in a smart way
+ to keep link order and avoid unnecessary updates of next-pointers */
+ pnext = &BUCKET(tb, from_ix);
+ p = *pnext;
+ to_pnext = &BUCKET(tb, to_ix);
+ while (p != NULL) {
+ if (is_pseudo_deleted(p)) { /* rare but possible with fine locking */
+ *pnext = p->next;
+ free_term(tb, p);
+ p = *pnext;
+ }
+ else {
+ int ix = p->hvalue & szm;
+ if (ix != from_ix) {
+ ASSERT(ix == (from_ix ^ ((szm+1)>>1)));
+ *to_pnext = p;
+ /* Swap "from" and "to": */
+ from_ix = ix;
+ to_pnext = pnext;
+ }
+ pnext = &p->next;
+ p = *pnext;
+ }
+ }
+ *to_pnext = NULL;
+ WUNLOCK_HASH(lck);
- /* Finally, let's split the bucket. We try to do it in a smart way
- to keep link order and avoid unnecessary updates of next-pointers */
- pnext = &BUCKET(tb, from_ix);
- p = *pnext;
- to_pnext = &BUCKET(tb, nactive);
- while (p != NULL) {
- if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */
- *pnext = p->next;
- free_term(tb, p);
- p = *pnext;
- }
- else {
- int ix = p->hvalue & szm;
- if (ix != from_ix) {
- ASSERT(ix == (from_ix ^ ((szm+1)>>1)));
- *to_pnext = p;
- /* Swap "from" and "to": */
- from_ix = ix;
- to_pnext = pnext;
- }
- pnext = &p->next;
- p = *pnext;
- }
- }
- *to_pnext = NULL;
+ }while (--loop_limit && nitems > GROW_LIMIT(nactive));
- WUNLOCK_HASH(lck);
return;
abort:
@@ -2712,60 +2911,78 @@ abort:
/* Shrink table by joining top bucket.
** Remove top segment if it gets empty.
*/
-static void shrink(DbTableHash* tb, int nactive)
-{
- if (!begin_resizing(tb))
- return; /* already in progress */
- if (NACTIVE(tb) == nactive) {
- erts_smp_rwmtx_t* lck;
- int src_ix = nactive - 1;
- int low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1;
- int dst_ix = src_ix & low_szm;
-
- ASSERT(dst_ix < src_ix);
- ASSERT(nactive > SEGSZ);
- lck = WLOCK_HASH(tb, dst_ix);
- /* Double check for racing table fixers */
- if (!IS_FIXED(tb)) {
- HashDbTerm** src_bp = &BUCKET(tb, src_ix);
- HashDbTerm** dst_bp = &BUCKET(tb, dst_ix);
- HashDbTerm** bp = src_bp;
-
- /* Q: Why join lists by appending "dst" at the end of "src"?
- A: Must step through "src" anyway to purge pseudo deleted. */
- while(*bp != NULL) {
- if ((*bp)->hvalue == INVALID_HASH) {
- HashDbTerm* deleted = *bp;
- *bp = deleted->next;
- free_term(tb, deleted);
- } else {
- bp = &(*bp)->next;
- }
- }
- *bp = *dst_bp;
- *dst_bp = *src_bp;
- *src_bp = NULL;
-
- erts_smp_atomic_set_nob(&tb->nactive, src_ix);
- if (dst_ix == 0) {
- erts_smp_atomic_set_relb(&tb->szm, low_szm);
- }
- WUNLOCK_HASH(lck);
-
- if (tb->nslots - src_ix >= SEGSZ) {
- free_seg(tb, 0);
- }
- }
- else {
- WUNLOCK_HASH(lck);
- }
+static void shrink(DbTableHash* tb, int nitems)
+{
+ HashDbTerm** src_bp;
+ HashDbTerm** dst_bp;
+ HashDbTerm** bp;
+ erts_rwmtx_t* lck;
+ int src_ix, dst_ix, low_szm;
+ int nactive;
+ int loop_limit = 5;
- }
- /*else already done */
+ do {
+ if (!begin_resizing(tb))
+ return; /* already in progress */
+ nactive = NACTIVE(tb);
+ if (!(nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive))) {
+ goto abort; /* already done (race) */
+ }
+ src_ix = nactive - 1;
+ low_szm = erts_atomic_read_nob(&tb->szm) >> 1;
+ dst_ix = src_ix & low_szm;
+
+ ASSERT(dst_ix < src_ix);
+ ASSERT(nactive > FIRST_SEGSZ);
+ lck = WLOCK_HASH(tb, dst_ix);
+ ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix));
+ /* Double check for racing table fixers */
+ if (IS_FIXED(tb)) {
+ WUNLOCK_HASH(lck);
+ goto abort;
+ }
+
+ src_bp = &BUCKET(tb, src_ix);
+ dst_bp = &BUCKET(tb, dst_ix);
+ bp = src_bp;
+
+ /*
+ * We join lists by appending "dst" at the end of "src"
+ * as we must step through "src" anyway to purge pseudo deleted.
+ */
+ while(*bp != NULL) {
+ if (is_pseudo_deleted(*bp)) {
+ HashDbTerm* deleted = *bp;
+ *bp = deleted->next;
+ free_term(tb, deleted);
+ } else {
+ bp = &(*bp)->next;
+ }
+ }
+ *bp = *dst_bp;
+ *dst_bp = *src_bp;
+ *src_bp = NULL;
+
+ nactive = src_ix;
+ erts_atomic_set_nob(&tb->nactive, nactive);
+ if (dst_ix == 0) {
+ erts_atomic_set_relb(&tb->szm, low_szm);
+ }
+ WUNLOCK_HASH(lck);
+
+ if (tb->nslots - src_ix >= EXT_SEGSZ) {
+ free_seg(tb, 0);
+ }
+ done_resizing(tb);
+
+ } while (--loop_limit
+ && nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive));
+ return;
+
+abort:
done_resizing(tb);
}
-
/* Search a list of tuples for a matching key */
static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
@@ -2784,15 +3001,15 @@ static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
/* It return the next live object in a table, NULL if no more */
/* In-bucket: RLOCKED */
/* Out-bucket: RLOCKED unless NULL */
-static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
- HashDbTerm *list)
+static HashDbTerm* next_live(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr,
+ HashDbTerm *list)
{
int i;
- ERTS_SMP_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr));
+ ERTS_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr));
- for (list = list->next; list != NULL; list = list->next) {
- if (list->hvalue != INVALID_HASH)
+ for ( ; list != NULL; list = list->next) {
+ if (!is_pseudo_deleted(list))
return list;
}
@@ -2801,7 +3018,7 @@ static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
list = BUCKET(tb,i);
while (list != NULL) {
- if (list->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(list)) {
*iptr = i;
return list;
}
@@ -2819,7 +3036,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
DbTableHash *tb = &tbl->hash;
HashValue hval;
HashDbTerm **bp, *b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int flags = 0;
ASSERT(tb->common.status & DB_SET);
@@ -2834,7 +3051,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
break;
}
if (has_key(tb, b, key, hval)) {
- if (b->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(b)) {
goto Ldone;
}
break;
@@ -2864,18 +3081,20 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
HashDbTerm *q = new_dbterm(tb, obj);
q->hvalue = hval;
+ q->pseudo_deleted = 0;
q->next = NULL;
*bp = b = q;
flags |= DB_INC_TRY_GROW;
} else {
HashDbTerm *q, *next = b->next;
- ASSERT(b->hvalue == INVALID_HASH);
+ ASSERT(is_pseudo_deleted(b));
q = replace_dbterm(tb, b, obj);
q->next = next;
- q->hvalue = hval;
+ ASSERT(q->hvalue == hval);
+ q->pseudo_deleted = 0;
*bp = b = q;
- erts_smp_atomic_inc_nob(&tb->common.nitems);
+ erts_atomic_inc_nob(&tb->common.nitems);
}
HRelease(p, hend, htop);
@@ -2901,24 +3120,24 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
DbTableHash *tb = &tbl->hash;
HashDbTerm **bp = (HashDbTerm **) handle->bp;
HashDbTerm *b = *bp;
- erts_smp_rwmtx_t* lck = (erts_smp_rwmtx_t*) handle->lck;
+ erts_rwmtx_t* lck = (erts_rwmtx_t*) handle->lck;
HashDbTerm* free_me = NULL;
- ERTS_SMP_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */
+ ERTS_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */
ASSERT((&b->dbterm == handle->dbterm) == !(tb->common.compress && handle->flags & DB_MUST_RESIZE));
if (handle->flags & DB_NEW_OBJECT && cret != DB_ERROR_NONE) {
if (IS_FIXED(tb) && add_fixed_deletion(tb, hash_to_ix(tb, b->hvalue),
0)) {
- b->hvalue = INVALID_HASH;
+ b->pseudo_deleted = 1;
} else {
*bp = b->next;
free_me = b;
}
WUNLOCK_HASH(lck);
- erts_smp_atomic_dec_nob(&tb->common.nitems);
+ erts_atomic_dec_nob(&tb->common.nitems);
try_shrink(tb);
} else {
if (handle->flags & DB_MUST_RESIZE) {
@@ -2927,12 +3146,12 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
}
if (handle->flags & DB_INC_TRY_GROW) {
int nactive;
- int nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems);
+ int nitems = erts_atomic_inc_read_nob(&tb->common.nitems);
WUNLOCK_HASH(lck);
nactive = NACTIVE(tb);
- if (nitems > nactive * (CHAIN_LEN + 1) && !IS_FIXED(tb)) {
- grow(tb, nactive);
+ if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
+ grow(tb, nitems);
}
} else {
WUNLOCK_HASH(lck);
@@ -2948,16 +3167,19 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
return;
}
-static int db_delete_all_objects_hash(Process* p, DbTable* tbl)
+static SWord db_delete_all_objects_hash(Process* p, DbTable* tbl, SWord reds)
{
if (IS_FIXED(tbl)) {
- db_mark_all_deleted_hash(tbl);
+ reds = db_mark_all_deleted_hash(tbl, reds);
} else {
- db_free_table_hash(tbl);
+ reds = db_free_table_continue_hash(tbl, reds);
+ if (reds < 0)
+ return reds;
+
db_create_hash(p, tbl);
- erts_smp_atomic_set_nob(&tbl->hash.common.nitems, 0);
+ erts_atomic_set_nob(&tbl->hash.common.nitems, 0);
}
- return 0;
+ return reds;
}
void db_foreach_offheap_hash(DbTable *tbl,
@@ -2985,7 +3207,7 @@ void db_foreach_offheap_hash(DbTable *tbl,
void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
{
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int sum = 0;
int sq_sum = 0;
int kept_items = 0;
@@ -3000,7 +3222,7 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
len = 0;
for (b = BUCKET(tb,ix); b!=NULL; b=b->next) {
len++;
- if (b->hvalue == INVALID_HASH)
+ if (is_pseudo_deleted(b))
++kept_items;
}
sum += len;
@@ -3016,24 +3238,30 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb)));
stats->kept_items = kept_items;
}
-#ifdef HARDDEBUG
-void db_check_table_hash(DbTable *tbl)
+/* For testing only */
+Eterm erts_ets_hash_sizeof_ext_segtab(void)
{
- DbTableHash *tb = &tbl->hash;
- HashDbTerm* list;
- int j;
-
- for (j = 0; j < tb->nactive; j++) {
- if ((list = BUCKET(tb,j)) != 0) {
- while (list != 0) {
- if (!is_tuple(make_tuple(list->dbterm.tpl))) {
- erts_exit(ERTS_ERROR_EXIT, "Bad term in slot %d of ets table", j);
- }
- list = list->next;
- }
- }
- }
+ return make_small(((SIZEOF_EXT_SEGTAB(0)-1) / sizeof(UWord)) + 1);
}
-#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable) {
+ int i;
+
+ if(tb->locks == NULL) {
+ return;
+ }
+
+ for(i = 0; i < DB_HASH_LOCK_CNT; i++) {
+ erts_lcnt_ref_t *ref = &tb->locks->lck_vec[i].lck.lcnt;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(ref, "db_hash_slot", tb->common.the_name,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(ref);
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index e654363cd5..eae5537ba4 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,13 +24,26 @@
#include "erl_db_util.h" /* DbTerm & DbTableCommon */
typedef struct fixed_deletion {
- int slot;
+ UWord slot : sizeof(UWord)*8 - 2;
+ UWord all : 1;
+ UWord trap : 1;
struct fixed_deletion *next;
} FixedDeletion;
+
+typedef Uint32 HashVal;
+
typedef struct hash_db_term {
struct hash_db_term* next; /* next bucket */
- HashValue hvalue; /* stored hash value */
+#if SIZEOF_VOID_P == 4
+ Uint32 hvalue : 31; /* stored hash value */
+ Uint32 pseudo_deleted : 1;
+# define MAX_HASH_MASK (((Uint32)1 << 31)-1)
+#elif SIZEOF_VOID_P == 8
+ Uint32 hvalue;
+ Uint32 pseudo_deleted;
+# define MAX_HASH_MASK ((Uint32)(Sint32)-1)
+#endif
DbTerm dbterm; /* The actual term */
} HashDbTerm;
@@ -42,31 +55,29 @@ typedef struct hash_db_term {
typedef struct db_table_hash_fine_locks {
union {
- erts_smp_rwmtx_t lck;
- byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_smp_rwmtx_t))];
+ erts_rwmtx_t lck;
+ byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_rwmtx_t))];
}lck_vec[DB_HASH_LOCK_CNT];
} DbTableHashFineLocks;
typedef struct db_table_hash {
DbTableCommon common;
- erts_smp_atomic_t segtab; /* The segment table (struct segment**) */
- erts_smp_atomic_t szm; /* current size mask. */
-
+ /* SMP: szm and nactive are write-protected by is_resizing or table write lock */
+ erts_atomic_t szm; /* current size mask. */
+ erts_atomic_t nactive; /* Number of "active" slots */
+
+ erts_atomic_t segtab; /* The segment table (struct segment**) */
+ struct segment* first_segtab[1];
+
/* SMP: nslots and nsegs are protected by is_resizing or table write lock */
int nslots; /* Total number of slots */
int nsegs; /* Size of segment table */
/* List of slots where elements have been deleted while table was fixed */
- erts_smp_atomic_t fixdel; /* (FixedDeletion*) */
- erts_smp_atomic_t nactive; /* Number of "active" slots */
- erts_smp_atomic_t is_resizing; /* grow/shrink in progress */
-#ifdef ERTS_SMP
+ erts_atomic_t fixdel; /* (FixedDeletion*) */
+ erts_atomic_t is_resizing; /* grow/shrink in progress */
DbTableHashFineLocks* locks;
-#endif
-#ifdef VALGRIND
- struct ext_segment* top_ptr_to_segment_with_active_segtab;
-#endif
} DbTableHash;
@@ -75,7 +86,7 @@ typedef struct db_table_hash {
** table types. The process is always an [in out] parameter.
*/
void db_initialize_hash(void);
-void db_unfix_table_hash(DbTableHash *tb /* [in out] */);
+SWord db_unfix_table_hash(DbTableHash *tb);
Uint db_kept_items_hash(DbTableHash *tb);
/* Interface for meta pid table */
@@ -88,17 +99,6 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret);
int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret);
-int db_get_element_array(DbTable *tbl,
- Eterm key,
- int ndex,
- Eterm *ret,
- int *num_ret);
-
-int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value);
-
-/* not yet in method table */
-int db_mark_all_deleted_hash(DbTable *tbl);
-
typedef struct {
float avg_chain_len;
float std_dev_chain_len;
@@ -109,5 +109,10 @@ typedef struct {
}DbHashStats;
void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
+Eterm erts_ets_hash_sizeof_ext_segtab(void);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable);
+#endif
#endif /* _DB_HASH_H */
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 02d211a4bb..45e4be2426 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -50,7 +50,7 @@
#include "erl_db_tree.h"
#define GETKEY_WITH_POS(Keypos, Tplp) (*((Tplp) + Keypos))
-#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems))
+#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems))
/*
** A stack of this size is enough for an AVL tree with more than
@@ -76,8 +76,14 @@
((Dtt->pos) ? \
(Dtt)->array[(Dtt)->pos - 1] : NULL)
-#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL)
+#define TOPN_NODE(Dtt, Pos) \
+ (((Pos) < Dtt->pos) ? \
+ (Dtt)->array[(Dtt)->pos - ((Pos) + 1)] : NULL)
+
+#define REPLACE_TOP_NODE(Dtt, Node) \
+ if ((Dtt)->pos) (Dtt)->array[(Dtt)->pos - 1] = (Node)
+#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL)
/* Obtain table static stack if available. NULL if not.
@@ -85,7 +91,7 @@
*/
static DbTreeStack* get_static_stack(DbTableTree* tb)
{
- if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
+ if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
return &tb->static_stack;
}
return NULL;
@@ -97,7 +103,7 @@ static DbTreeStack* get_static_stack(DbTableTree* tb)
static DbTreeStack* get_any_stack(DbTableTree* tb)
{
DbTreeStack* stack;
- if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
+ if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
return &tb->static_stack;
}
stack = erts_db_alloc(ERTS_ALC_T_DB_STK, (DbTable *) tb,
@@ -111,8 +117,8 @@ static DbTreeStack* get_any_stack(DbTableTree* tb)
static void release_stack(DbTableTree* tb, DbTreeStack* stack)
{
if (stack == &tb->static_stack) {
- ASSERT(erts_smp_atomic_read_nob(&tb->is_stack_busy) == 1);
- erts_smp_atomic_set_relb(&tb->is_stack_busy, 0);
+ ASSERT(erts_atomic_read_nob(&tb->is_stack_busy) == 1);
+ erts_atomic_set_relb(&tb->is_stack_busy, 0);
}
else {
erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb,
@@ -164,11 +170,6 @@ static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableTree *tb, TreeDbTerm* old,
#define DIR_END 2
/*
- * Special binary flag
- */
-#define BIN_FLAG_ALL_OBJECTS BIN_FLAG_USR1
-
-/*
* Number of records to delete before trapping.
*/
#define DELETE_RECORD_LIMIT 12000
@@ -180,7 +181,6 @@ static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableTree *tb, TreeDbTerm* old,
static TreeDbTerm *traverse_until(TreeDbTerm *t, int *current, int to);
static void check_slot_pos(DbTableTree *tb);
static void check_saved_stack(DbTableTree *tb);
-static int check_table_tree(DbTableTree* tb, TreeDbTerm *t);
#define TREE_DEBUG
#endif
@@ -213,9 +213,6 @@ static void do_dump_tree2(DbTableTree*, int to, void *to_arg, int show,
* functions.
*/
struct mp_info {
- int all_objects; /* True if complete objects are always
- * returned from the match_spec (can use
- * copy_shallow on the return value) */
int something_can_match; /* The match_spec is not "impossible" */
int some_limitation; /* There is some limitation on the search
* area, i. e. least and/or most is set.*/
@@ -226,9 +223,9 @@ struct mp_info {
Eterm most; /* The highest matching key (possibly
* partially bound expression) */
- TreeDbTerm *save_term; /* If the key is completely bound, this
- * will be the Tree node we're searching
- * for, otherwise it will be useless */
+ TreeDbTerm **save_term; /* If the key is completely bound, this
+ * will be the Tree node we're searching
+ * for, otherwise it will be useless */
Binary *mp; /* The compiled match program */
};
@@ -243,7 +240,6 @@ struct select_context {
Eterm *lastobj;
Sint32 max;
int keypos;
- int all_objects;
Sint got;
Sint chunk_size;
};
@@ -258,7 +254,6 @@ struct select_count_context {
Eterm *lastobj;
Sint32 max;
int keypos;
- int all_objects;
Sint got;
};
@@ -278,12 +273,29 @@ struct select_delete_context {
};
/*
+ * Used by doit_select_replace
+ */
+struct select_replace_context {
+ Process *p;
+ DbTableTree *tb;
+ Binary *mp;
+ Eterm end_condition;
+ Eterm *lastobj;
+ Sint32 max;
+ int keypos;
+ Sint replaced;
+};
+
+/* Used by select_replace on analyze_pattern */
+typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Eterm body);
+
+/*
** Forward declarations
*/
static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key);
static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
Eterm object);
-static int do_free_tree_cont(DbTableTree *tb, int num_left);
+static SWord do_free_tree_continue(DbTableTree *tb, SWord reds);
static void free_term(DbTableTree *tb, TreeDbTerm* p);
static int balance_left(TreeDbTerm **this);
static int balance_right(TreeDbTerm **this);
@@ -291,6 +303,7 @@ static int delsub(TreeDbTerm **this);
static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot);
static TreeDbTerm *find_node(DbTableTree *tb, Eterm key);
static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key);
+static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack*, TreeDbTerm *this);
static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack*, Eterm key);
static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack*, Eterm key);
static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack*,
@@ -312,14 +325,23 @@ static void traverse_forward(DbTableTree *tb,
TreeDbTerm *,
void *,
int),
- void *context);
-static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
+ void *context);
+static void traverse_update_backwards(DbTableTree *tb,
+ DbTreeStack*,
+ Eterm lastkey,
+ int (*doit)(DbTableTree *tb,
+ TreeDbTerm **, // out
+ void *,
+ int),
+ void *context);
+static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret,
Eterm *partly_bound_key);
static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key);
static Sint do_cmp_partly_bound(Eterm a, Eterm b, int *done);
static int analyze_pattern(DbTableTree *tb, Eterm pattern,
- struct mp_info *mpi);
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi);
static int doit_select(DbTableTree *tb,
TreeDbTerm *this,
void *ptr,
@@ -336,6 +358,10 @@ static int doit_select_delete(DbTableTree *tb,
TreeDbTerm *this,
void *ptr,
int forward);
+static int doit_select_replace(DbTableTree *tb,
+ TreeDbTerm **this_ptr,
+ void *ptr,
+ int forward);
static int partly_bound_can_match_lesser(Eterm partly_bound_1,
Eterm partly_bound_2);
@@ -369,33 +395,37 @@ static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret);
static int db_erase_object_tree(DbTable *tbl, Eterm object,Eterm *ret);
static int db_slot_tree(Process *p, DbTable *tbl,
Eterm slot_term, Eterm *ret);
-static int db_select_tree(Process *p, DbTable *tbl,
+static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reversed, Eterm *ret);
-static int db_select_count_tree(Process *p, DbTable *tbl,
+static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret);
-static int db_select_chunk_tree(Process *p, DbTable *tbl,
+static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reversed, Eterm *ret);
static int db_select_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
static int db_select_count_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
-static int db_select_delete_tree(Process *p, DbTable *tbl,
+static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret);
static int db_select_delete_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
+static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret);
+static int db_select_replace_continue_tree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
static int db_take_tree(Process *, DbTable *, Eterm, Eterm *);
-static void db_print_tree(int to, void *to_arg,
+static void db_print_tree(fmtfn_t to, void *to_arg,
int show, DbTable *tbl);
-static int db_free_table_tree(DbTable *tbl);
+static int db_free_empty_table_tree(DbTable *tbl);
-static int db_free_table_continue_tree(DbTable *tbl);
+static SWord db_free_table_continue_tree(DbTable *tbl, SWord);
static void db_foreach_offheap_tree(DbTable *,
void (*)(ErlOffHeap *, void *),
void *);
-static int db_delete_all_objects_tree(Process* p, DbTable* tbl);
+static SWord db_delete_all_objects_tree(Process* p, DbTable* tbl, SWord reds);
#ifdef HARDDEBUG
static void db_check_table_tree(DbTable *tbl);
@@ -436,17 +466,14 @@ DbTableMethod db_tree =
db_select_delete_continue_tree,
db_select_count_tree,
db_select_count_continue_tree,
+ db_select_replace_tree,
+ db_select_replace_continue_tree,
db_take_tree,
db_delete_all_objects_tree,
- db_free_table_tree,
+ db_free_empty_table_tree,
db_free_table_continue_tree,
db_print_tree,
db_foreach_offheap_tree,
-#ifdef HARDDEBUG
- db_check_table_tree,
-#else
- NULL,
-#endif
db_lookup_dbterm_tree,
db_finalize_dbterm_tree
@@ -476,7 +503,7 @@ int db_create_tree(Process *p, DbTable *tbl)
sizeof(TreeDbTerm *) * STACK_NEED);
tb->static_stack.pos = 0;
tb->static_stack.slot = 0;
- erts_smp_atomic_init_nob(&tb->is_stack_busy, 0);
+ erts_atomic_init_nob(&tb->is_stack_busy, 0);
tb->deletion = 0;
return DB_ERROR_NONE;
}
@@ -605,8 +632,8 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
for (;;)
if (!*this) { /* Found our place */
state = 1;
- if (erts_smp_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) {
- erts_smp_atomic_dec_nob(&tb->common.nitems);
+ if (erts_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) {
+ erts_atomic_dec_nob(&tb->common.nitems);
return DB_ERROR_SYSRES;
}
*this = new_dbterm(tb, obj);
@@ -935,17 +962,15 @@ static int db_select_continue_tree(Process *p,
if (arityval(*tptr) != 8)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- if (!is_small(tptr[4]) || !is_binary(tptr[5]) ||
+ if (!is_small(tptr[4]) ||
!(is_list(tptr[6]) || tptr[6] == NIL) || !is_small(tptr[7]) ||
!is_small(tptr[8]))
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
lastkey = tptr[2];
end_condition = tptr[3];
- if (!(thing_subtag(*binary_val(tptr[5])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[5]))->val;
- if (!IsMatchProgBinary(mp))
+ mp = erts_db_get_match_prog_binary(tptr[5]);
+ if (!mp)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
chunk_size = signed_val(tptr[4]);
@@ -956,7 +981,6 @@ static int db_select_continue_tree(Process *p,
sc.lastobj = NULL;
sc.max = 1000;
sc.keypos = tb->common.keypos;
- sc.all_objects = mp->flags & BIN_FLAG_ALL_OBJECTS;
sc.chunk_size = chunk_size;
reverse = unsigned_val(tptr[7]);
sc.got = signed_val(tptr[8]);
@@ -1060,7 +1084,7 @@ static int db_select_continue_tree(Process *p,
}
-static int db_select_tree(Process *p, DbTable *tbl,
+static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reverse, Eterm *ret)
{
/* Strategy: Traverse backwards to build resulting list from tail to head */
@@ -1097,7 +1121,7 @@ static int db_select_tree(Process *p, DbTable *tbl,
sc.got = 0;
sc.chunk_size = 0;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1107,11 +1131,10 @@ static int db_select_tree(Process *p, DbTable *tbl,
}
sc.mp = mpi.mp;
- sc.all_objects = mpi.all_objects;
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
- doit_select(tb,mpi.save_term,&sc,0 /* direction doesn't matter */);
+ doit_select(tb,*(mpi.save_term),&sc,0 /* direction doesn't matter */);
RET_TO_BIF(sc.accum,DB_ERROR_NONE);
}
@@ -1145,15 +1168,13 @@ static int db_select_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb=db_make_mp_binary(p,mpi.mp,&hp);
+ mpb= erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
make_small(0), /* Chunk size of zero means not chunked to the
@@ -1208,10 +1229,8 @@ static int db_select_count_continue_tree(Process *p,
lastkey = tptr[2];
end_condition = tptr[3];
- if (!(thing_subtag(*binary_val(tptr[4])) == REFC_BINARY_SUBTAG))
- RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
- if (!IsMatchProgBinary(mp))
+ mp = erts_db_get_match_prog_binary(tptr[4]);
+ if (!mp)
RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
sc.p = p;
@@ -1267,7 +1286,7 @@ static int db_select_count_continue_tree(Process *p,
}
-static int db_select_count_tree(Process *p, DbTable *tbl,
+static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
@@ -1302,7 +1321,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
sc.keypos = tb->common.keypos;
sc.got = 0;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1312,11 +1331,10 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
}
sc.mp = mpi.mp;
- sc.all_objects = mpi.all_objects;
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
- doit_select_count(tb,mpi.save_term,&sc,0 /* dummy */);
+ doit_select_count(tb,*(mpi.save_term),&sc,0 /* dummy */);
RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE);
}
@@ -1338,22 +1356,20 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
if (IS_USMALL(0, sc.got)) {
- hp = HAlloc(p, sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
egot = make_small(sc.got);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6);
egot = uint_to_big(sc.got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
key = copy_struct(key, sz, &hp, &MSO(p));
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
mpb,
@@ -1367,7 +1383,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
}
-static int db_select_chunk_tree(Process *p, DbTable *tbl,
+static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reverse,
Eterm *ret)
@@ -1405,7 +1421,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
sc.got = 0;
sc.chunk_size = chunk_size;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(NIL,errcode);
}
@@ -1415,11 +1431,10 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
}
sc.mp = mpi.mp;
- sc.all_objects = mpi.all_objects;
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
- doit_select(tb,mpi.save_term,&sc, 0 /* direction doesn't matter */);
+ doit_select(tb,*(mpi.save_term),&sc, 0 /* direction doesn't matter */);
if (sc.accum != NIL) {
hp=HAlloc(p, 3);
RET_TO_BIF(TUPLE2(hp,sc.accum,am_EOT),DB_ERROR_NONE);
@@ -1470,15 +1485,13 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program,
needn't be copied */
@@ -1495,15 +1508,13 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, sc.lastobj);
sz = size_object(key);
- hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
- if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
make_small(chunk_size),
@@ -1558,7 +1569,7 @@ static int db_select_delete_continue_tree(Process *p,
sc.erase_lastterm = 0; /* Before first RET_TO_BIF */
sc.lastterm = NULL;
- mp = ((ProcBin *) binary_val(tptr[4]))->val;
+ mp = erts_db_get_match_prog_binary_unchecked(tptr[4]);
sc.p = p;
sc.tb = tb;
if (is_big(tptr[5])) {
@@ -1571,7 +1582,7 @@ static int db_select_delete_continue_tree(Process *p,
sc.max = 1000;
sc.keypos = tb->common.keypos;
- ASSERT(!erts_smp_atomic_read_nob(&tb->is_stack_busy));
+ ASSERT(!erts_atomic_read_nob(&tb->is_stack_busy));
traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc);
BUMP_REDS(p, 1000 - sc.max);
@@ -1609,7 +1620,7 @@ static int db_select_delete_continue_tree(Process *p,
#undef RET_TO_BIF
}
-static int db_select_delete_tree(Process *p, DbTable *tbl,
+static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
@@ -1647,7 +1658,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
sc.keypos = tb->common.keypos;
sc.tb = tb;
- if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ if ((errcode = analyze_pattern(tb, pattern, NULL, &mpi)) != DB_ERROR_NONE) {
RET_TO_BIF(0,errcode);
}
@@ -1660,7 +1671,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
- doit_select_delete(tb,mpi.save_term,&sc, 0 /* direction doesn't
+ doit_select_delete(tb,*(mpi.save_term),&sc, 0 /* direction doesn't
matter */);
RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE);
}
@@ -1682,20 +1693,20 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
key = GETKEY(tb, (sc.lastterm)->dbterm.tpl);
sz = size_object(key);
if (IS_USMALL(0, sc.accum)) {
- hp = HAlloc(p, sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
eaccsum = make_small(sc.accum);
}
else {
- hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + PROC_BIN_SIZE + 6);
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6);
eaccsum = uint_to_big(sc.accum, hp);
hp += BIG_UINT_HEAP_SIZE;
}
key = copy_struct(key, sz, &hp, &MSO(p));
- mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
mpb,
@@ -1712,6 +1723,197 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
}
+static int db_select_replace_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ struct select_replace_context sc;
+ unsigned sz;
+ Eterm *hp;
+ Eterm lastkey;
+ Eterm end_condition;
+ Binary *mp;
+ Eterm key;
+ Eterm *tptr;
+ Eterm ereplaced;
+ Sint prev_replaced;
+
+
+#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0);
+
+ /* Decode continuation. We know it's a tuple and everything else as
+ this is only called by ourselves */
+
+ /* continuation:
+ {Table, Lastkey, EndCondition, MatchProgBin, HowManyReplaced}*/
+
+ tptr = tuple_val(continuation);
+
+ if (arityval(*tptr) != 5)
+ erts_exit(ERTS_ERROR_EXIT,"Internal error in ets:select_replace/1");
+
+ lastkey = tptr[2];
+ end_condition = tptr[3];
+ mp = erts_db_get_match_prog_binary_unchecked(tptr[4]);
+
+ sc.p = p;
+ sc.mp = mp;
+ sc.end_condition = NIL;
+ sc.lastobj = NULL;
+ sc.max = 1000;
+ sc.keypos = tb->common.keypos;
+ if (is_big(tptr[5])) {
+ sc.replaced = big_to_uint32(tptr[5]);
+ } else {
+ sc.replaced = unsigned_val(tptr[5]);
+ }
+ prev_replaced = sc.replaced;
+
+ stack = get_any_stack(tb);
+ traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc);
+ release_stack(tb,stack);
+
+ // the more objects we've replaced, the more reductions we've consumed
+ BUMP_REDS(p, MIN(2000, (1000 - sc.max) + (sc.replaced - prev_replaced)));
+
+ if (sc.max > 0) {
+ RET_TO_BIF(erts_make_integer(sc.replaced,p), DB_ERROR_NONE);
+ }
+ key = GETKEY(tb, sc.lastobj);
+ if (end_condition != NIL &&
+ (cmp_partly_bound(end_condition,key) > 0)) {
+ /* done anyway */
+ RET_TO_BIF(make_small(sc.replaced),DB_ERROR_NONE);
+ }
+ /* Not done yet, let's trap. */
+ sz = size_object(key);
+ if (IS_USMALL(0, sc.replaced)) {
+ hp = HAlloc(p, sz + 6);
+ ereplaced = make_small(sc.replaced);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + 6);
+ ereplaced = uint_to_big(sc.replaced, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ continuation = TUPLE5
+ (hp,
+ tptr[1],
+ key,
+ tptr[3],
+ tptr[4],
+ ereplaced);
+ RET_TO_BIF(bif_trap1(&ets_select_replace_continue_exp, p, continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+}
+
+static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ struct select_replace_context sc;
+ struct mp_info mpi;
+ Eterm lastkey = THE_NON_VALUE;
+ Eterm key;
+ Eterm continuation;
+ unsigned sz;
+ Eterm *hp;
+ TreeDbTerm *this;
+ int errcode;
+ Eterm ereplaced;
+ Eterm mpb;
+
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ if (mpi.mp != NULL) { \
+ erts_bin_free(mpi.mp); \
+ } \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+ mpi.mp = NULL;
+
+ sc.lastobj = NULL;
+ sc.p = p;
+ sc.tb = tb;
+ sc.max = 1000;
+ sc.end_condition = NIL;
+ sc.keypos = tb->common.keypos;
+ sc.replaced = 0;
+
+ if ((errcode = analyze_pattern(tb, pattern, db_match_keeps_key, &mpi)) != DB_ERROR_NONE) {
+ RET_TO_BIF(NIL,errcode);
+ }
+
+ if (!mpi.something_can_match) {
+ RET_TO_BIF(make_small(0),DB_ERROR_NONE);
+ /* can't possibly match anything */
+ }
+
+ sc.mp = mpi.mp;
+
+ if (!mpi.got_partial && mpi.some_limitation &&
+ CMP_EQ(mpi.least,mpi.most)) {
+ doit_select_replace(tb,mpi.save_term,&sc,0 /* dummy */);
+ reset_static_stack(tb); /* may refer replaced term */
+ RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE);
+ }
+
+ stack = get_any_stack(tb);
+
+ if (mpi.some_limitation) {
+ if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ }
+ sc.end_condition = mpi.least;
+ }
+
+ traverse_update_backwards(tb, stack, lastkey, &doit_select_replace, &sc);
+ release_stack(tb,stack);
+ // the more objects we've replaced, the more reductions we've consumed
+ BUMP_REDS(p, MIN(2000, (1000 - sc.max) + sc.replaced));
+ if (sc.max > 0) {
+ RET_TO_BIF(erts_make_integer(sc.replaced,p),DB_ERROR_NONE);
+ }
+
+ key = GETKEY(tb, sc.lastobj);
+ sz = size_object(key);
+ if (IS_USMALL(0, sc.replaced)) {
+ hp = HAlloc(p, sz + ERTS_MAGIC_REF_THING_SIZE + 6);
+ ereplaced = make_small(sc.replaced);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + ERTS_MAGIC_REF_THING_SIZE + 6);
+ ereplaced = uint_to_big(sc.replaced, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
+
+ continuation = TUPLE5
+ (hp,
+ tid,
+ key,
+ sc.end_condition, /* From the match program, needn't be copied */
+ mpb,
+ ereplaced);
+
+ /* Don't free mpi.mp, so don't use macro */
+ *ret = bif_trap1(&ets_select_replace_continue_exp, p, continuation);
+ return DB_ERROR_NONE;
+
+#undef RET_TO_BIF
+
+}
+
static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
@@ -1740,7 +1942,7 @@ static int db_take_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
/* Display tree contents (for dump) */
-static void db_print_tree(int to, void *to_arg,
+static void db_print_tree(fmtfn_t to, void *to_arg,
int show,
DbTable *tbl)
{
@@ -1759,41 +1961,43 @@ static void db_print_tree(int to, void *to_arg,
}
/* release all memory occupied by a single table */
-static int db_free_table_tree(DbTable *tbl)
+static int db_free_empty_table_tree(DbTable *tbl)
{
- while (!db_free_table_continue_tree(tbl))
+ ASSERT(tbl->tree.root == NULL);
+ while (db_free_table_continue_tree(tbl, ERTS_SWORD_MAX) < 0)
;
return 1;
}
-static int db_free_table_continue_tree(DbTable *tbl)
+static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds)
{
DbTableTree *tb = &tbl->tree;
- int result;
if (!tb->deletion) {
tb->static_stack.pos = 0;
tb->deletion = 1;
PUSH_NODE(&tb->static_stack, tb->root);
}
- result = do_free_tree_cont(tb, DELETE_RECORD_LIMIT);
- if (result) { /* Completely done. */
+ reds = do_free_tree_continue(tb, reds);
+ if (reds >= 0) { /* Completely done. */
erts_db_free(ERTS_ALC_T_DB_STK,
(DbTable *) tb,
(void *) tb->static_stack.array,
sizeof(TreeDbTerm *) * STACK_NEED);
- ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size)
+ ASSERT(erts_atomic_read_nob(&tb->common.memory_size)
== sizeof(DbTable));
}
- return result;
+ return reds;
}
-static int db_delete_all_objects_tree(Process* p, DbTable* tbl)
+static SWord db_delete_all_objects_tree(Process* p, DbTable* tbl, SWord reds)
{
- db_free_table_tree(tbl);
+ reds = db_free_table_continue_tree(tbl, reds);
+ if (reds < 0)
+ return reds;
db_create_tree(p, tbl);
- erts_smp_atomic_set_nob(&tbl->tree.common.nitems, 0);
- return 0;
+ erts_atomic_set_nob(&tbl->tree.common.nitems, 0);
+ return reds;
}
static void do_db_tree_foreach_offheap(TreeDbTerm *,
@@ -1872,7 +2076,7 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) {
tstack[tpos++] = this;
state = delsub(this);
}
- erts_smp_atomic_dec_nob(&tb->common.nitems);
+ erts_atomic_dec_nob(&tb->common.nitems);
break;
}
}
@@ -1939,7 +2143,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
tstack[tpos++] = this;
state = delsub(this);
}
- erts_smp_atomic_dec_nob(&tb->common.nitems);
+ erts_atomic_dec_nob(&tb->common.nitems);
break;
}
}
@@ -1958,8 +2162,9 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
** For the select functions, analyzes the pattern and determines which
** part of the tree should be searched. Also compiles the match program
*/
-static int analyze_pattern(DbTableTree *tb, Eterm pattern,
- struct mp_info *mpi)
+static int analyze_pattern(DbTableTree *tb, Eterm pattern,
+ extra_match_validator_t extra_validator, /* Optional callback */
+ struct mp_info *mpi)
{
Eterm lst, tpl, ttpl;
Eterm *matches,*guards, *bodies;
@@ -1978,7 +2183,6 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
mpi->got_partial = 0;
mpi->something_can_match = 0;
mpi->mp = NULL;
- mpi->all_objects = 1;
mpi->save_term = NULL;
for (lst = pattern; is_list(lst); lst = CDR(list_val(lst)))
@@ -1997,7 +2201,10 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
i = 0;
for(lst = pattern; is_list(lst); lst = CDR(list_val(lst))) {
- Eterm body;
+ Eterm match;
+ Eterm guard;
+ Eterm body;
+
ttpl = CAR(list_val(lst));
if (!is_tuple(ttpl)) {
if (buff != sbuff) {
@@ -2012,17 +2219,24 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
}
return DB_ERROR_BADPARAM;
}
- matches[i] = tpl = ptpl[1];
- guards[i] = ptpl[2];
+ matches[i] = match = tpl = ptpl[1];
+ guards[i] = guard = ptpl[2];
bodies[i] = body = ptpl[3];
+
+ if(extra_validator != NULL && !extra_validator(tb->common.keypos, match, guard, body)) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+
if (!is_list(body) || CDR(list_val(body)) != NIL ||
CAR(list_val(body)) != am_DollarUnderscore) {
- mpi->all_objects = 0;
}
++i;
partly_bound = NIL;
- res = key_given(tb, tpl, &mpi->save_term, &partly_bound);
+ res = key_given(tb, tpl, &(mpi->save_term), &partly_bound);
if ( res >= 0 ) { /* Can match something */
key = 0;
mpi->something_can_match = 1;
@@ -2068,7 +2282,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
return DB_ERROR_NONE;
}
-static int do_free_tree_cont(DbTableTree *tb, int num_left)
+static SWord do_free_tree_continue(DbTableTree *tb, SWord reds)
{
TreeDbTerm *root;
TreeDbTerm *p;
@@ -2087,15 +2301,14 @@ static int do_free_tree_cont(DbTableTree *tb, int num_left)
root = p;
} else {
free_term(tb, root);
- if (--num_left > 0) {
- break;
- } else {
- return 0; /* Done enough for now */
- }
+ if (--reds < 0) {
+ return reds; /* Done enough for now */
+ }
+ break;
}
}
}
- return 1;
+ return reds;
}
/*
@@ -2526,6 +2739,58 @@ static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key)
return this;
}
+/*
+ * Find node and return the address of the node pointer (NULL if not found)
+ * Tries to reuse the existing stack for performance.
+ */
+
+static TreeDbTerm **find_ptr(DbTableTree *tb, DbTreeStack *stack, TreeDbTerm *this) {
+ Eterm key = GETKEY(tb, this->dbterm.tpl);
+ TreeDbTerm *tmp;
+ TreeDbTerm *parent;
+ Sint c;
+
+ if(( tmp = TOP_NODE(stack)) != NULL) {
+ if (!cmp_key_eq(tb,key,tmp)) {
+ /* Start from the beginning */
+ stack->pos = stack->slot = 0;
+ }
+ }
+ if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */
+ if (( tmp = tb->root ) == NULL)
+ return NULL;
+ for (;;) {
+ PUSH_NODE(stack, tmp);
+ if (( c = cmp_key(tb,key,tmp) ) < 0) {
+ if (tmp->left == NULL) /* We are at the next
+ and the element does
+ not exist */
+ break;
+ else
+ tmp = tmp->left;
+ } else if (c > 0) {
+ if (tmp->right == NULL) /* Done */
+ return NULL;
+ else
+ tmp = tmp->right;
+ } else
+ break;
+ }
+ }
+
+ if (TOP_NODE(stack) != this)
+ return NULL;
+
+ parent = TOPN_NODE(stack, 1);
+ if (parent == NULL)
+ return ((this != tb->root) ? NULL : &(tb->root));
+ if (parent->left == this)
+ return &(parent->left);
+ if (parent->right == this)
+ return &(parent->right);
+ return NULL;
+}
+
static int
db_lookup_dbterm_tree(Process *p, DbTable *tbl, Eterm key, Eterm obj,
DbUpdateHandle* handle)
@@ -2667,13 +2932,60 @@ static void traverse_forward(DbTableTree *tb,
}
/*
+ * Traverse the tree with an update callback function, used by db_select_replace
+ */
+static void traverse_update_backwards(DbTableTree *tb,
+ DbTreeStack* stack,
+ Eterm lastkey,
+ int (*doit)(DbTableTree*,
+ TreeDbTerm**,
+ void*,
+ int),
+ void* context)
+{
+ int res;
+ TreeDbTerm *this, *next, **this_ptr;
+
+ if (lastkey == THE_NON_VALUE) {
+ stack->pos = stack->slot = 0;
+ if (( this = tb->root ) == NULL) {
+ return;
+ }
+ while (this != NULL) {
+ PUSH_NODE(stack, this);
+ this = this->right;
+ }
+ this = TOP_NODE(stack);
+ this_ptr = find_ptr(tb, stack, this);
+ ASSERT(this_ptr != NULL);
+ res = (*doit)(tb, this_ptr, context, 0);
+ REPLACE_TOP_NODE(stack, *this_ptr);
+ next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
+ if (!res)
+ return;
+ } else {
+ next = find_prev(tb, stack, lastkey);
+ }
+
+ while ((this = next) != NULL) {
+ this_ptr = find_ptr(tb, stack, this);
+ ASSERT(this_ptr != NULL);
+ res = (*doit)(tb, this_ptr, context, 0);
+ REPLACE_TOP_NODE(stack, *this_ptr);
+ next = find_prev(tb, stack, GETKEY(tb, (*this_ptr)->dbterm.tpl));
+ if (!res)
+ return;
+ }
+}
+
+/*
* Returns 0 if not given 1 if given and -1 on no possible match
* if key is given; *ret is set to point to the object concerned.
*/
-static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
+static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm ***ret,
Eterm *partly_bound)
{
- TreeDbTerm *this;
+ TreeDbTerm **this;
Eterm key;
ASSERT(ret != NULL);
@@ -2683,7 +2995,7 @@ static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
if (is_non_value(key))
return -1; /* can't possibly match anything */
if (!db_has_variable(key)) { /* Bound key */
- if (( this = find_node(tb, key) ) == NULL) {
+ if (( this = find_node2(tb, key) ) == NULL) {
return -1;
}
*ret = this;
@@ -2994,8 +3306,7 @@ static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0))) {
return 0;
}
- ret = db_match_dbterm(&tb->common,sc->p,sc->mp,sc->all_objects,
- &this->dbterm, &hp, 2);
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, &hp, 2);
if (is_value(ret)) {
sc->accum = CONS(hp, ret, sc->accum);
}
@@ -3026,8 +3337,7 @@ static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0)) {
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, 0,
- &this->dbterm, NULL, 0);
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, NULL, 0);
if (ret == am_true) {
++(sc->got);
}
@@ -3056,8 +3366,7 @@ static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, sc->all_objects,
- &this->dbterm, &hp, 2);
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, &hp, 2);
if (is_value(ret)) {
++(sc->got);
sc->accum = CONS(hp, ret, sc->accum);
@@ -3092,8 +3401,7 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
cmp_partly_bound(sc->end_condition,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0)
return 0;
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, 0,
- &this->dbterm, NULL, 0);
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, NULL, 0);
if (ret == am_true) {
key = GETKEY(sc->tb, this->dbterm.tpl);
linkout_tree(sc->tb, key);
@@ -3106,6 +3414,45 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 1;
}
+static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
+ int forward)
+{
+ struct select_replace_context *sc = (struct select_replace_context *) ptr;
+ Eterm ret;
+
+ sc->lastobj = (*this)->dbterm.tpl;
+
+ /* Always backwards traversing */
+ if (sc->end_condition != NIL &&
+ (cmp_partly_bound(sc->end_condition,
+ GETKEY_WITH_POS(sc->keypos, (*this)->dbterm.tpl)) > 0)) {
+ return 0;
+ }
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &(*this)->dbterm, NULL, 0);
+
+ if (is_value(ret)) {
+ TreeDbTerm* new;
+ TreeDbTerm* old = *this;
+#ifdef DEBUG
+ Eterm key = db_getkey(tb->common.keypos, ret);
+ ASSERT(is_value(key));
+ ASSERT(cmp_key(tb, key, old) == 0);
+#endif
+ new = new_dbterm(tb, ret);
+ new->left = old->left;
+ new->right = old->right;
+ new->balance = old->balance;
+ sc->lastobj = new->dbterm.tpl;
+ *this = new;
+ free_term(tb, old);
+ ++(sc->replaced);
+ }
+ if (--(sc->max) <= 0) {
+ return 0;
+ }
+ return 1;
+}
+
#ifdef TREE_DEBUG
static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
TreeDbTerm *t, int offset)
@@ -3134,6 +3481,9 @@ static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
#ifdef HARDDEBUG
+/*
+ * No called, but kept as it might come to use
+ */
void db_check_table_tree(DbTable *tbl)
{
DbTableTree *tb = &tbl->tree;
diff --git a/erts/emulator/beam/erl_db_tree.h b/erts/emulator/beam/erl_db_tree.h
index 72749ead1e..54da2a6bc1 100644
--- a/erts/emulator/beam/erl_db_tree.h
+++ b/erts/emulator/beam/erl_db_tree.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -41,7 +41,7 @@ typedef struct db_table_tree {
/* Tree-specific fields */
TreeDbTerm *root; /* The tree root */
Uint deletion; /* Being deleted */
- erts_smp_atomic_t is_stack_busy;
+ erts_atomic_t is_stack_busy;
DbTreeStack static_stack;
} DbTableTree;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 6732b708a8..f1d47326b4 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,6 +38,7 @@
#include "erl_binary.h"
#include "erl_map.h"
#include "erl_thr_progress.h"
+#include "erl_proc_sig_queue.h"
#include "erl_db_util.h"
@@ -74,12 +75,35 @@ DBIF_TABLE_GUARD | DBIF_TABLE_BODY | DBIF_TRACE_GUARD | DBIF_TRACE_BODY
typedef struct DMC_STACK_TYPE(Type) { \
int pos; \
int siz; \
- Type def[DMC_DEFAULT_SIZE]; \
+ int bytes; \
Type *data; \
+ Type def[DMC_DEFAULT_SIZE]; \
} DMC_STACK_TYPE(Type)
+
+
+typedef int Dummy;
+DMC_DECLARE_STACK_TYPE(Dummy);
+
+static void dmc_stack_grow(DMC_Dummy_stack* s)
+{
+ int was_bytes = s->bytes;
+ s->siz *= 2;
+ s->bytes *= 2;
+ if (s->data == s->def) {
+ s->data = erts_alloc(ERTS_ALC_T_DB_MC_STK, s->bytes);
+ sys_memcpy(s->data, s->def, was_bytes);
+ }
+ else {
+ s->data = erts_realloc(ERTS_ALC_T_DB_MC_STK, s->data, s->bytes);
+ }
+}
-#define DMC_INIT_STACK(Name) \
- (Name).pos = 0; (Name).siz = DMC_DEFAULT_SIZE; (Name).data = (Name).def
+#define DMC_INIT_STACK(Name) do { \
+ (Name).pos = 0; \
+ (Name).siz = DMC_DEFAULT_SIZE; \
+ (Name).bytes = sizeof((Name).def); \
+ (Name).data = (Name).def; \
+} while (0)
#define DMC_STACK_DATA(Name) (Name).data
@@ -87,21 +111,19 @@ typedef struct DMC_STACK_TYPE(Type) { \
#define DMC_PUSH(On, What) \
do { \
- if ((On).pos >= (On).siz) { \
- (On).siz *= 2; \
- (On).data \
- = (((On).def == (On).data) \
- ? memcpy(erts_alloc(ERTS_ALC_T_DB_MC_STK, \
- (On).siz*sizeof(*((On).data))), \
- (On).def, \
- DMC_DEFAULT_SIZE*sizeof(*((On).data))) \
- : erts_realloc(ERTS_ALC_T_DB_MC_STK, \
- (void *) (On).data, \
- (On).siz*sizeof(*((On).data)))); \
- } \
+ if ((On).pos >= (On).siz) \
+ dmc_stack_grow((DMC_Dummy_stack*)&(On)); \
(On).data[(On).pos++] = What; \
} while (0)
+#define DMC_PUSH2(On, A, B) \
+do { \
+ if ((On).pos+1 >= (On).siz) \
+ dmc_stack_grow((DMC_Dummy_stack*)&(On)); \
+ (On).data[(On).pos++] = A; \
+ (On).data[(On).pos++] = B; \
+} while (0)
+
#define DMC_POP(From) (From).data[--(From).pos]
#define DMC_TOP(From) (From).data[(From).pos - 1]
@@ -155,6 +177,7 @@ set_tracee_flags(Process *tracee_p, ErtsTracer tracer,
: am_false);
erts_tracer_replace(&tracee_p->common, tracer);
ERTS_TRACE_FLAGS(tracee_p) = flags;
+
return ret;
}
/*
@@ -170,7 +193,7 @@ static Eterm
set_match_trace(Process *tracee_p, Eterm fail_term, ErtsTracer tracer,
Uint d_flags, Uint e_flags) {
- ERTS_SMP_LC_ASSERT(
+ ERTS_LC_ASSERT(
ERTS_PROC_LOCKS_ALL == erts_proc_lc_my_proc_locks(tracee_p)
|| erts_thr_progress_is_blocking());
@@ -361,11 +384,7 @@ typedef struct {
} ErtsMatchPseudoProcess;
-#ifdef ERTS_SMP
-static erts_smp_tsd_key_t match_pseudo_process_key;
-#else
-static ErtsMatchPseudoProcess *match_pseudo_process;
-#endif
+static erts_tsd_key_t match_pseudo_process_key;
static ERTS_INLINE void
cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap)
@@ -414,32 +433,27 @@ static ERTS_INLINE ErtsMatchPseudoProcess *
get_match_pseudo_process(Process *c_p, Uint heap_size)
{
ErtsMatchPseudoProcess *mpsp;
-#ifdef ERTS_SMP
ErtsSchedulerData *esdp;
esdp = c_p ? c_p->scheduler_data : erts_get_scheduler_data();
mpsp = esdp ? esdp->match_pseudo_process :
- (ErtsMatchPseudoProcess*) erts_smp_tsd_get(match_pseudo_process_key);
+ (ErtsMatchPseudoProcess*) erts_tsd_get(match_pseudo_process_key);
if (mpsp) {
- ASSERT(mpsp == erts_smp_tsd_get(match_pseudo_process_key));
+ ASSERT(mpsp == erts_tsd_get(match_pseudo_process_key));
ASSERT(mpsp->process.scheduler_data == esdp);
cleanup_match_pseudo_process(mpsp, 0);
}
else {
- ASSERT(erts_smp_tsd_get(match_pseudo_process_key) == NULL);
+ ASSERT(erts_tsd_get(match_pseudo_process_key) == NULL);
mpsp = create_match_pseudo_process();
if (esdp) {
esdp->match_pseudo_process = (void *) mpsp;
}
mpsp->process.scheduler_data = esdp;
- erts_smp_tsd_set(match_pseudo_process_key, (void *) mpsp);
+ erts_tsd_set(match_pseudo_process_key, (void *) mpsp);
}
-#else
- mpsp = match_pseudo_process;
- cleanup_match_pseudo_process(mpsp, 0);
-#endif
if (heap_size > ERTS_DEFAULT_MS_HEAP_SIZE*sizeof(Eterm)) {
mpsp->u.heap = (Eterm*) erts_alloc(ERTS_ALC_T_DB_MS_RUN_HEAP, heap_size);
}
@@ -449,31 +463,25 @@ get_match_pseudo_process(Process *c_p, Uint heap_size)
return mpsp;
}
-#ifdef ERTS_SMP
static void
destroy_match_pseudo_process(void)
{
ErtsMatchPseudoProcess *mpsp;
- mpsp = (ErtsMatchPseudoProcess *)erts_smp_tsd_get(match_pseudo_process_key);
+ mpsp = (ErtsMatchPseudoProcess *)erts_tsd_get(match_pseudo_process_key);
if (mpsp) {
cleanup_match_pseudo_process(mpsp, 0);
erts_free(ERTS_ALC_T_DB_MS_PSDO_PROC, (void *) mpsp);
- erts_smp_tsd_set(match_pseudo_process_key, (void *) NULL);
+ erts_tsd_set(match_pseudo_process_key, (void *) NULL);
}
}
-#endif
static
void
match_pseudo_process_init(void)
{
-#ifdef ERTS_SMP
- erts_smp_tsd_key_create(&match_pseudo_process_key,
+ erts_tsd_key_create(&match_pseudo_process_key,
"erts_match_pseudo_process_key");
- erts_smp_install_exit_handler(destroy_match_pseudo_process);
-#else
- match_pseudo_process = create_match_pseudo_process();
-#endif
+ erts_thr_install_exit_handler(destroy_match_pseudo_process);
}
void
@@ -484,7 +492,7 @@ erts_match_set_release_result(Process* c_p)
/* The trace control word. */
-static erts_smp_atomic32_t trace_control_word;
+static erts_atomic32_t trace_control_word;
/* This needs to be here, before the bif table... */
@@ -630,6 +638,18 @@ static DMCGuardBif guard_tab[] =
DBIF_ALL
},
{
+ am_map_get,
+ &map_get_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_is_map_key,
+ &is_map_key_2,
+ 2,
+ DBIF_ALL
+ },
+ {
am_bit_size,
&bit_size_1,
1,
@@ -923,7 +943,7 @@ static void db_free_tmp_uncompressed(DbTerm* obj);
*/
BIF_RETTYPE db_get_trace_control_word(Process *p)
{
- Uint32 tcw = (Uint32) erts_smp_atomic32_read_acqb(&trace_control_word);
+ Uint32 tcw = (Uint32) erts_atomic32_read_acqb(&trace_control_word);
BIF_RET(erts_make_integer((Uint) tcw, p));
}
@@ -941,7 +961,7 @@ BIF_RETTYPE db_set_trace_control_word(Process *p, Eterm new)
if (val != ((Uint32)val))
BIF_ERROR(p, BADARG);
- old_tcw = (Uint32) erts_smp_atomic32_xchg_relb(&trace_control_word,
+ old_tcw = (Uint32) erts_atomic32_xchg_relb(&trace_control_word,
(erts_aint32_t) val);
BIF_RET(erts_make_integer((Uint) old_tcw, p));
}
@@ -1119,9 +1139,186 @@ error:
return NULL;
}
-/* This is used when tracing */
-Eterm erts_match_set_lint(Process *p, Eterm matchexpr) {
- return db_match_set_lint(p, matchexpr, DCOMP_TRACE);
+/*
+ * Compare a matching term 'a' with a constructing term 'b' for equality.
+ *
+ * Returns true if 'b' is guaranteed to always construct
+ * the same term as 'a' has matched.
+ */
+static int db_match_eq_body(Eterm a, Eterm b, int const_mode)
+{
+ DECLARE_ESTACK(s);
+ Uint arity;
+ Eterm *ap, *bp;
+ const Eterm CONST_MODE_OFF = THE_NON_VALUE;
+
+ while (1) {
+ switch(b & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST:
+ if (!is_list(a))
+ return 0;
+ ESTACK_PUSH2(s, CDR(list_val(a)), CDR(list_val(b)));
+ a = CAR(list_val(a));
+ b = CAR(list_val(b));
+ continue; /* loop without pop */
+
+ case TAG_PRIMARY_BOXED:
+ if (is_tuple(b)) {
+ bp = tuple_val(b);
+ if (!const_mode) {
+ if (bp[0] == make_arityval(1) && is_tuple(bp[1])) {
+ b = bp[1]; /* double-tuple syntax */
+ }
+ else if (bp[0] == make_arityval(2) && bp[1] == am_const) {
+ ESTACK_PUSH(s, CONST_MODE_OFF);
+ const_mode = 1; /* {const, term()} syntax */
+ b = bp[2];
+ continue; /* loop without pop */
+ }
+ else
+ return 0; /* function call or invalid tuple syntax */
+ }
+ if (!is_tuple(a))
+ return 0;
+
+ ap = tuple_val(a);
+ bp = tuple_val(b);
+ if (ap[0] != bp[0])
+ return 0;
+ arity = arityval(ap[0]);
+ if (arity > 0) {
+ a = *(++ap);
+ b = *(++bp);
+ while(--arity) {
+ ESTACK_PUSH2(s, *(++ap), *(++bp));
+ }
+ continue; /* loop without pop */
+ }
+ }
+ else if (is_map(b)) {
+ /* We don't know what other pairs the matched map may contain */
+ return 0;
+ }
+ else if (!eq(a,b)) /* other boxed */
+ return 0;
+ break;
+
+ case TAG_PRIMARY_IMMED1:
+ if (a != b || a == am_Underscore || a == am_DollarDollar
+ || a == am_DollarUnderscore
+ || (const_mode && db_is_variable(a) >= 0)) {
+
+ return 0;
+ }
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "db_compare: "
+ "Bad object on ESTACK: 0x%bex\n", b);
+ }
+
+pop_next:
+ if (ESTACK_ISEMPTY(s))
+ break; /* done */
+
+ b = ESTACK_POP(s);
+ if (b == CONST_MODE_OFF) {
+ ASSERT(const_mode);
+ const_mode = 0;
+ goto pop_next;
+ }
+ a = ESTACK_POP(s);
+ }
+
+ DESTROY_ESTACK(s);
+ return 1;
+}
+
+/* This is used by select_replace */
+int db_match_keeps_key(int keypos, Eterm match, Eterm guard, Eterm body)
+{
+ Eterm match_key;
+ Eterm* body_list;
+ Eterm single_body_term;
+ Eterm* single_body_term_tpl;
+ Eterm single_body_subterm;
+ Eterm single_body_subterm_key;
+ Eterm* single_body_subterm_key_tpl;
+ int const_mode;
+
+ if (!is_list(body)) {
+ return 0;
+ }
+
+ body_list = list_val(body);
+ if (CDR(body_list) != NIL) {
+ return 0;
+ }
+
+ single_body_term = CAR(body_list);
+ if (single_body_term == am_DollarUnderscore) {
+ /* same tuple is returned */
+ return 1;
+ }
+
+ if (!is_tuple(single_body_term)) {
+ return 0;
+ }
+
+ match_key = db_getkey(keypos, match);
+ if (!is_value(match_key)) {
+ // can't get key out of match
+ return 0;
+ }
+
+ single_body_term_tpl = tuple_val(single_body_term);
+ if (single_body_term_tpl[0] == make_arityval(2) &&
+ single_body_term_tpl[1] == am_const) {
+ /* {const, {"ets-tuple constant"}} */
+ single_body_subterm = single_body_term_tpl[2];
+ const_mode = 1;
+ }
+ else if (*single_body_term_tpl == make_arityval(1)) {
+ /* {{"ets-tuple construction"}} */
+ single_body_subterm = single_body_term_tpl[1];
+ const_mode = 0;
+ }
+ else {
+ /* not a tuple construction */
+ return 0;
+ }
+
+ single_body_subterm_key = db_getkey(keypos, single_body_subterm);
+ if (!is_value(single_body_subterm_key)) {
+ // can't get key out of single body subterm
+ return 0;
+ }
+
+ if (db_match_eq_body(match_key, single_body_subterm_key, const_mode)) {
+ /* tuple with same key is returned */
+ return 1;
+ }
+
+ if (const_mode) {
+ /* constant key did not match */
+ return 0;
+ }
+
+ if (!is_tuple(single_body_subterm_key)) {
+ /* can't possibly be an element instruction */
+ return 0;
+ }
+
+ single_body_subterm_key_tpl = tuple_val(single_body_subterm_key);
+ if (single_body_subterm_key_tpl[0] == make_arityval(3) &&
+ single_body_subterm_key_tpl[1] == am_element &&
+ single_body_subterm_key_tpl[3] == am_DollarUnderscore &&
+ single_body_subterm_key_tpl[2] == make_small(keypos))
+ {
+ /* {element, KeyPos, '$_'} */
+ return 1;
+ }
+
+ return 0;
}
Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags)
@@ -1289,7 +1486,7 @@ void db_initialize_util(void){
sizeof(DMCGuardBif),
(int (*)(const void *, const void *)) &cmp_guard_bif);
match_pseudo_process_init();
- erts_smp_atomic32_init_nob(&trace_control_word, 0);
+ erts_atomic32_init_nob(&trace_control_word, 0);
}
@@ -1356,7 +1553,7 @@ restart:
context.current_match < num_progs;
++context.current_match) { /* This loop is long,
too long */
- memset(heap.vars, 0, heap.size * sizeof(*heap.vars));
+ sys_memset(heap.vars, 0, heap.size * sizeof(*heap.vars));
t = context.matchexpr[context.current_match];
context.stack_used = 0;
structure_checked = 0;
@@ -1375,8 +1572,7 @@ restart:
if (is_flatmap(t)) {
num_iters = flatmap_get_size(flatmap_val(t));
if (!structure_checked) {
- DMC_PUSH(text, matchMap);
- DMC_PUSH(text, num_iters);
+ DMC_PUSH2(text, matchMap, num_iters);
}
structure_checked = 0;
for (i = 0; i < num_iters; ++i) {
@@ -1396,8 +1592,7 @@ restart:
}
goto error;
}
- DMC_PUSH(text, matchKey);
- DMC_PUSH(text, dmc_private_copy(&context, key));
+ DMC_PUSH2(text, matchKey, dmc_private_copy(&context, key));
{
int old_stack = ++(context.stack_used);
Eterm value = flatmap_get_values(flatmap_val(t))[i];
@@ -1425,8 +1620,7 @@ restart:
Eterm *kv;
num_iters = hashmap_size(t);
if (!structure_checked) {
- DMC_PUSH(text, matchMap);
- DMC_PUSH(text, num_iters);
+ DMC_PUSH2(text, matchMap, num_iters);
}
structure_checked = 0;
@@ -1452,8 +1646,7 @@ restart:
DESTROY_WSTACK(wstack);
goto error;
}
- DMC_PUSH(text, matchKey);
- DMC_PUSH(text, dmc_private_copy(&context, key));
+ DMC_PUSH2(text, matchKey, dmc_private_copy(&context, key));
{
int old_stack = ++(context.stack_used);
res = dmc_one_term(&context, &heap, &stack, &text,
@@ -1483,8 +1676,7 @@ restart:
num_iters = arityval(*tuple_val(t));
if (!structure_checked) { /* i.e. we did not
pop it */
- DMC_PUSH(text,matchTuple);
- DMC_PUSH(text,num_iters);
+ DMC_PUSH2(text, matchTuple, num_iters);
}
structure_checked = 0;
for (i = 1; i <= num_iters; ++i) {
@@ -1702,17 +1894,18 @@ error: /* Here is were we land when compilation failed. */
/*
** Free a match program (in a binary)
*/
-void erts_db_match_prog_destructor(Binary *bprog)
+int erts_db_match_prog_destructor(Binary *bprog)
{
MatchProg *prog;
if (bprog == NULL)
- return;
+ return 1;
prog = Binary2MatchProg(bprog);
if (prog->term_save != NULL) {
free_message_buffer(prog->term_save);
}
if (prog->saved_program_buf != NULL)
free_message_buffer(prog->saved_program_buf);
+ return 1;
}
void
@@ -1769,7 +1962,7 @@ Eterm db_prog_match(Process *c_p,
Eterm t;
Eterm *esp;
MatchVariable* variables;
- BeamInstr *cp;
+ ErtsCodeMFA *cp;
const UWord *pc = prog->text;
Eterm *ehp;
Eterm ret;
@@ -1834,7 +2027,8 @@ restart:
do_catch = 0;
fail_label = -1;
build_proc = psp;
- esdp->current_process = psp;
+ if (esdp)
+ esdp->current_process = psp;
#ifdef DEBUG
ASSERT(variables == mpsp->u.variables);
@@ -1976,7 +2170,7 @@ restart:
case matchEqFloat:
if (!is_float(*ep))
FAIL();
- if (memcmp(float_val(*ep) + 1, pc, sizeof(double)))
+ if (sys_memcmp(float_val(*ep) + 1, pc, sizeof(double)))
FAIL();
pc += TermWords(2);
++ep;
@@ -2171,7 +2365,7 @@ restart:
}
}
else {
- *esp = term;
+ *esp++ = term;
}
break;
case matchPushArrayAsList:
@@ -2326,32 +2520,27 @@ restart:
if (have_no_seqtrace(SEQ_TRACE_TOKEN(c_p)))
*esp++ = NIL;
else {
- Eterm sender = SEQ_TRACE_TOKEN_SENDER(c_p);
- Uint sender_sz = is_immed(sender) ? 0 : size_object(sender);
- ehp = HAllocX(build_proc, 6 + sender_sz, HEAP_XTRA);
- if (sender_sz) {
- sender = copy_struct(sender, sender_sz, &ehp, &MSO(build_proc));
- }
- *esp++ = make_tuple(ehp);
- ehp[0] = make_arityval(5);
- ehp[1] = SEQ_TRACE_TOKEN_FLAGS(c_p);
- ehp[2] = SEQ_TRACE_TOKEN_LABEL(c_p);
- ehp[3] = SEQ_TRACE_TOKEN_SERIAL(c_p);
- ehp[4] = sender;
- ehp[5] = SEQ_TRACE_TOKEN_LASTCNT(c_p);
- ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
- ASSERT(is_immed(ehp[1]));
- ASSERT(is_immed(ehp[2]));
- ASSERT(is_immed(ehp[3]));
- ASSERT(is_immed(ehp[5]));
- }
+ Eterm token;
+ Uint token_sz;
+
+ ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
+ ASSERT(is_immed(SEQ_TRACE_TOKEN_FLAGS(c_p)));
+ ASSERT(is_immed(SEQ_TRACE_TOKEN_SERIAL(c_p)));
+ ASSERT(is_immed(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
+
+ token = SEQ_TRACE_TOKEN(c_p);
+ token_sz = size_object(token);
+
+ ehp = HAllocX(build_proc, token_sz, HEAP_XTRA);
+ *esp++ = copy_struct(token, token_sz, &ehp, &MSO(build_proc));
+ }
break;
case matchEnableTrace:
ASSERT(c_p == self);
if ( (n = erts_trace_flag2bit(esp[-1]))) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
set_tracee_flags(c_p, ERTS_TRACER(c_p), 0, n);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2366,9 +2555,9 @@ restart:
/* Always take over the tracer of the current process */
set_tracee_flags(tmpp, ERTS_TRACER(c_p), 0, n);
if (tmpp == c_p)
- erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR);
else
- erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
+ erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
esp[-1] = am_true;
}
}
@@ -2376,9 +2565,9 @@ restart:
case matchDisableTrace:
ASSERT(c_p == self);
if ( (n = erts_trace_flag2bit(esp[-1]))) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
set_tracee_flags(c_p, ERTS_TRACER(c_p), n, 0);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2393,9 +2582,9 @@ restart:
/* Always take over the tracer of the current process */
set_tracee_flags(tmpp, ERTS_TRACER(c_p), n, 0);
if (tmpp == c_p)
- erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR);
else
- erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
+ erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
esp[-1] = am_true;
}
}
@@ -2408,9 +2597,9 @@ restart:
ehp = HAllocX(build_proc, 4, HEAP_XTRA);
*esp++ = make_tuple(ehp);
ehp[0] = make_arityval(3);
- ehp[1] = cp[0];
- ehp[2] = cp[1];
- ehp[3] = make_small((Uint) cp[2]);
+ ehp[1] = cp->module;
+ ehp[2] = cp->function;
+ ehp[3] = make_small((Uint) cp->arity);
}
break;
case matchSilent:
@@ -2419,14 +2608,14 @@ restart:
if (in_flags & ERTS_PAM_IGNORE_TRACE_SILENT)
break;
if (*esp == am_true) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(c_p) |= F_TRACE_SILENT;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
else if (*esp == am_false) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(c_p) &= ~F_TRACE_SILENT;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
break;
case matchTrace2:
@@ -2455,10 +2644,10 @@ restart:
ERTS_TRACER_CLEAR(&tracer);
break;
}
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
(--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer,
d_flags, e_flags);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACER_CLEAR(&tracer);
}
break;
@@ -2488,13 +2677,13 @@ restart:
if (tmpp == c_p) {
(--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer,
d_flags, e_flags);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
} else {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
(--esp)[-1] = set_match_trace(tmpp, FAIL_TERM, tracer,
d_flags, e_flags);
- erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
ERTS_TRACER_CLEAR(&tracer);
}
@@ -2503,7 +2692,8 @@ restart:
do_catch = 1;
if (in_flags & ERTS_PAM_COPY_RESULT) {
build_proc = c_p;
- esdp->current_process = c_p;
+ if (esdp)
+ esdp->current_process = c_p;
}
break;
case matchHalt:
@@ -2545,14 +2735,6 @@ success:
}
-/*
- * Convert a match program to a "magic" binary to return up to erlang
- */
-Eterm db_make_mp_binary(Process *p, Binary *mp, Eterm **hpp)
-{
- return erts_mk_magic_binary_term(hpp, &MSO(p), mp);
-}
-
DMCErrInfo *db_new_dmc_err_info(void)
{
DMCErrInfo *ret = erts_alloc(ERTS_ALC_T_DB_DMC_ERR_INFO,
@@ -2585,12 +2767,10 @@ Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei)
if (vnum >= 0)
erts_snprintf(buff,sizeof(buff)+20,tmp->error_string, vnum);
else
- strcpy(buff,tmp->error_string);
- sl = strlen(buff);
+ sys_strcpy(buff,tmp->error_string);
+ sl = sys_strlen(buff);
shp = HAlloc(p, sl * 2 + 5);
- sev = (tmp->severity == dmcWarning) ?
- am_atom_put("warning",7) :
- am_error;
+ sev = (tmp->severity == dmcWarning) ? am_warning : am_error;
tlist = buf_to_intlist(&shp, buff, sl, NIL);
tpl = TUPLE2(shp, sev, tlist);
shp += 3;
@@ -3101,9 +3281,7 @@ void db_cleanup_offheap_comp(DbTerm* obj)
}
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
- erts_bin_free(u.pb->val);
- }
+ erts_bin_release(u.pb->val);
break;
case FUN_SUBTAG:
ASSERT(u.pb != &tmp);
@@ -3111,6 +3289,10 @@ void db_cleanup_offheap_comp(DbTerm* obj)
erts_erase_fun_entry(u.fun->fe);
}
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ erts_bin_release((Binary *)u.mref->mb);
+ break;
default:
ASSERT(is_external_header(u.hdr->thing_word));
ASSERT(u.pb != &tmp);
@@ -3266,13 +3448,6 @@ int db_has_variable(Eterm node) {
return 0;
}
-int erts_db_is_compiled_ms(Eterm term)
-{
- return (is_binary(term)
- && (thing_subtag(*binary_val(term)) == REFC_BINARY_SUBTAG)
- && IsMatchProgBinary((((ProcBin *) binary_val(term))->val)));
-}
-
/*
** Local (static) utilities.
*/
@@ -3383,20 +3558,17 @@ static DMCRet dmc_one_term(DMCContext *context,
return retRestart;
}
if (heap->vars[n].is_bound) {
- DMC_PUSH(*text,matchCmp);
- DMC_PUSH(*text,n);
+ DMC_PUSH2(*text, matchCmp, n);
} else { /* Not bound, bind! */
if (n >= heap->vars_used)
heap->vars_used = n + 1;
- DMC_PUSH(*text,matchBind);
- DMC_PUSH(*text,n);
+ DMC_PUSH2(*text, matchBind, n);
heap->vars[n].is_bound = 1;
}
} else if (c == am_Underscore) {
DMC_PUSH(*text, matchSkip);
} else { /* Any immediate value */
- DMC_PUSH(*text, matchEq);
- DMC_PUSH(*text, (Uint) c);
+ DMC_PUSH2(*text, matchEq, (Uint) c);
}
break;
case TAG_PRIMARY_LIST:
@@ -3409,9 +3581,8 @@ static DMCRet dmc_one_term(DMCContext *context,
switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE):
n = arityval(*tuple_val(c));
- DMC_PUSH(*text, matchPushT);
+ DMC_PUSH2(*text, matchPushT, n);
++(context->stack_used);
- DMC_PUSH(*text, n);
DMC_PUSH(*stack, c);
break;
case (_TAG_HEADER_MAP >> _TAG_PRIMARY_SIZE):
@@ -3419,9 +3590,8 @@ static DMCRet dmc_one_term(DMCContext *context,
n = flatmap_get_size(flatmap_val(c));
else
n = hashmap_size(c);
- DMC_PUSH(*text, matchPushM);
+ DMC_PUSH2(*text, matchPushM, n);
++(context->stack_used);
- DMC_PUSH(*text, n);
DMC_PUSH(*stack, c);
break;
case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE):
@@ -3446,8 +3616,7 @@ static DMCRet dmc_one_term(DMCContext *context,
break;
}
case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
- DMC_PUSH(*text,matchEqFloat);
- DMC_PUSH(*text, (Uint) float_val(c)[1]);
+ DMC_PUSH2(*text, matchEqFloat, (Uint) float_val(c)[1]);
#ifdef ARCH_64
DMC_PUSH(*text, (Uint) 0);
#else
@@ -3455,8 +3624,7 @@ static DMCRet dmc_one_term(DMCContext *context,
#endif
break;
default: /* BINARY, FUN, VECTOR, or EXTERNAL */
- DMC_PUSH(*text, matchEqBin);
- DMC_PUSH(*text, dmc_private_copy(context, c));
+ DMC_PUSH2(*text, matchEqBin, dmc_private_copy(context, c));
break;
}
break;
@@ -3506,8 +3674,7 @@ static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(UWord) *text,
emb->next = context->save;
context->save = emb;
}
- DMC_PUSH(*text,matchPushC);
- DMC_PUSH(*text,(Uint) tmp);
+ DMC_PUSH2(*text, matchPushC, (Uint)tmp);
if (++context->stack_used > context->stack_need)
context->stack_need = context->stack_used;
}
@@ -3645,8 +3812,7 @@ dmc_tuple(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
*constant = 1;
return retOk;
}
- DMC_PUSH(*text, matchMkTuple);
- DMC_PUSH(*text, nelems);
+ DMC_PUSH2(*text, matchMkTuple, nelems);
context->stack_used -= (nelems - 1);
*constant = 0;
return retOk;
@@ -3673,13 +3839,11 @@ dmc_map(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
*constant = 1;
return retOk;
}
- DMC_PUSH(*text, matchPushC);
- DMC_PUSH(*text, dmc_private_copy(context, m->keys));
+ DMC_PUSH2(*text, matchPushC, dmc_private_copy(context, m->keys));
if (++context->stack_used > context->stack_need) {
context->stack_need = context->stack_used;
}
- DMC_PUSH(*text, matchMkFlatMap);
- DMC_PUSH(*text, nelems);
+ DMC_PUSH2(*text, matchMkFlatMap, nelems);
context->stack_used -= nelems;
*constant = 0;
return retOk;
@@ -3731,8 +3895,7 @@ dmc_map(DMCContext *context, DMCHeap *heap, DMC_STACK_TYPE(UWord) *text,
do_emit_constant(context, text, CDR(kv));
}
}
- DMC_PUSH(*text, matchMkHashMap);
- DMC_PUSH(*text, nelems);
+ DMC_PUSH2(*text, matchMkHashMap, nelems);
context->stack_used -= nelems;
DESTROY_WSTACK(wstack);
return retOk;
@@ -4892,7 +5055,7 @@ static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info)
ASSERT(j < x);
erts_snprintf(buff+1, sizeof(buff) - 1, "%u", (unsigned) j);
/* Yes, writing directly into terms, they ARE off heap */
- *p = erts_atom_put((byte *) buff, strlen(buff),
+ *p = erts_atom_put((byte *) buff, sys_strlen(buff),
ERTS_ATOM_ENC_LATIN1, 1);
}
++p;
@@ -5015,7 +5178,7 @@ BIF_RETTYPE match_spec_test_3(BIF_ALIST_3)
{
Eterm res;
#ifdef DMC_DEBUG
- if (BIF_ARG_3 == am_atom_put("dis",3)) {
+ if (BIF_ARG_3 == ERTS_MAKE_AM("dis")) {
test_disassemble_next = 1;
BIF_RET(am_true);
} else
@@ -5126,7 +5289,7 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
erts_free(ERTS_ALC_T_DB_TMP, arr);
}
erts_bin_free(mps);
- ret = TUPLE4(hp, am_atom_put("ok",2), res, flg, lint_res);
+ ret = TUPLE4(hp, am_ok, res, flg, lint_res);
}
return ret;
}
@@ -5168,8 +5331,9 @@ void db_free_tmp_uncompressed(DbTerm* obj)
}
Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog,
- int all, DbTerm* obj, Eterm** hpp, Uint extra)
+ DbTerm* obj, Eterm** hpp, Uint extra)
{
+ enum erts_pam_run_flags flags;
Uint32 dummy;
Eterm res;
@@ -5177,9 +5341,13 @@ Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog,
obj = db_alloc_tmp_uncompressed(tb, obj);
}
+ flags = (hpp ?
+ ERTS_PAM_COPY_RESULT | ERTS_PAM_CONTIGUOUS_TUPLE :
+ ERTS_PAM_TMP_RESULT | ERTS_PAM_CONTIGUOUS_TUPLE);
+
res = db_prog_match(c_p, c_p,
bprog, make_tuple(obj->tpl), NULL, 0,
- ERTS_PAM_COPY_RESULT|ERTS_PAM_CONTIGUOUS_TUPLE, &dummy);
+ flags, &dummy);
if (is_value(res) && hpp!=NULL) {
*hpp = HAlloc(c_p, extra);
@@ -5289,24 +5457,35 @@ void db_match_dis(Binary *bp)
case matchEqRef:
++t;
{
- RefThing *rt = (RefThing *) t;
+ Uint32 *num;
int ri;
- n = thing_arityval(rt->header);
- erts_printf("EqRef\t(%d) {", (int) n);
+
+ if (is_ordinary_ref_thing(t)) {
+ ErtsORefThing *rt = (ErtsORefThing *) t;
+ num = rt->num;
+ t += TermWords(ERTS_REF_THING_SIZE);
+ }
+ else {
+ ErtsMRefThing *mrt = (ErtsMRefThing *) t;
+ ASSERT(is_magic_ref_thing(t));
+ num = mrt->mb->refn;
+ t += TermWords(ERTS_MAGIC_REF_THING_SIZE);
+ }
+
+ erts_printf("EqRef\t(%d) {", (int) ERTS_REF_NUMBERS);
first = 1;
- for (ri = 0; ri < n; ++ri) {
+ for (ri = 0; ri < ERTS_REF_NUMBERS; ++ri) {
if (first)
first = 0;
else
erts_printf(", ");
#if defined(ARCH_64)
- erts_printf("0x%016bex", rt->data.ui[ri]);
+ erts_printf("0x%016bex", num[ri]);
#else
- erts_printf("0x%08bex", rt->data.ui[ri]);
+ erts_printf("0x%08bex", num[ri]);
#endif
}
}
- t += TermWords(REF_THING_SIZE);
erts_printf("}\n");
break;
case matchEqBig:
@@ -5337,7 +5516,7 @@ void db_match_dis(Binary *bp)
++t;
{
double num;
- memcpy(&num,t,sizeof(double));
+ sys_memcpy(&num,t,sizeof(double));
t += TermWords(2);
erts_printf("EqFloat\t%f\n", num);
}
@@ -5568,5 +5747,3 @@ void db_match_dis(Binary *bp)
}
#endif /* DMC_DEBUG */
-
-
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 4acedbfed0..6ec3b4f98f 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,6 +23,7 @@
#include "global.h"
#include "erl_message.h"
+#include "erl_bif_unique.h"
/*#define HARDDEBUG 1*/
@@ -31,6 +32,7 @@
** DMC_DEBUG does NOT need DEBUG, but DEBUG needs DMC_DEBUG
*/
#define DMC_DEBUG 1
+#define ETS_DBG_FORCE_TRAP 1
#endif
/*
@@ -74,9 +76,6 @@ typedef struct db_term {
*/
} DbTerm;
-union db_table;
-typedef union db_table DbTable;
-
#define DB_MUST_RESIZE 1
#define DB_NEW_OBJECT 2
#define DB_INC_TRY_GROW 4
@@ -137,45 +136,57 @@ typedef struct db_table_method
Eterm slot,
Eterm* ret);
int (*db_select_chunk)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Sint chunk_size,
int reverse,
Eterm* ret);
int (*db_select)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
int reverse,
Eterm* ret);
int (*db_select_delete)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Eterm* ret);
int (*db_select_continue)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
int (*db_select_delete_continue)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
int (*db_select_count)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Eterm* ret);
int (*db_select_count_continue)(Process* p,
DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
+ int (*db_select_replace)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm tid,
+ Eterm pattern,
+ Eterm* ret);
+ int (*db_select_replace_continue)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm continuation,
+ Eterm* ret);
int (*db_take)(Process *, DbTable *, Eterm, Eterm *);
- int (*db_delete_all_objects)(Process* p,
- DbTable* db /* [in out] */ );
+ SWord (*db_delete_all_objects)(Process* p, DbTable* db, SWord reds);
- int (*db_free_table)(DbTable* db /* [in out] */ );
- int (*db_free_table_continue)(DbTable* db); /* [in out] */
+ int (*db_free_empty_table)(DbTable* db);
+ SWord (*db_free_table_continue)(DbTable* db, SWord reds);
- void (*db_print)(int to,
+ void (*db_print)(fmtfn_t to,
void* to_arg,
int show,
DbTable* tb /* [in out] */ );
@@ -183,7 +194,6 @@ typedef struct db_table_method
void (*db_foreach_offheap)(DbTable* db, /* [in out] */
void (*func)(ErlOffHeap *, void *),
void *arg);
- void (*db_check_table)(DbTable* tb);
/* Lookup a dbterm for updating. Return false if not found. */
int (*db_lookup_dbterm)(Process *, DbTable *, Eterm key, Eterm obj,
@@ -197,11 +207,30 @@ typedef struct db_table_method
} DbTableMethod;
typedef struct db_fixation {
- Eterm pid;
+ /* Node in fixed_tabs list */
+ struct {
+ struct db_fixation *next, *prev;
+ Binary* btid;
+ } tabs;
+
+ /* Node in fixing_procs tree */
+ struct {
+ struct db_fixation *left, *right, *parent;
+ int is_red;
+ Process* p;
+ } procs;
+
+ /* Number of fixations on table from procs.p
+ * Protected by table write lock or read lock + fixlock
+ */
Uint counter;
- struct db_fixation *next;
} DbFixation;
+typedef struct {
+ DbTable *next;
+ DbTable *prev;
+} DbTableList;
+
/*
* This structure contains data for all different types of database
* tables. Note that these fields must match the same fields
@@ -211,56 +240,58 @@ typedef struct db_fixation {
*/
typedef struct db_table_common {
- erts_refc_t ref; /* fixation counter */
-#ifdef ERTS_SMP
- erts_smp_rwmtx_t rwlock; /* rw lock on table */
- erts_smp_mtx_t fixlock; /* Protects fixations,megasec,sec,microsec */
+ erts_refc_t refc; /* reference count of table struct */
+ erts_refc_t fix_count;/* fixation counter */
+ DbTableList all;
+ DbTableList owned;
+ erts_rwmtx_t rwlock; /* rw lock on table */
+ erts_mtx_t fixlock; /* Protects fixing_procs and time */
int is_thread_safe; /* No fine locking inside table needed */
Uint32 type; /* table type, *read only* after creation */
-#endif
Eterm owner; /* Pid of the creator */
Eterm heir; /* Pid of the heir */
UWord heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */
Uint64 heir_started_interval; /* To further identify the heir */
Eterm the_name; /* an atom */
- Eterm id; /* atom | integer */
+ Binary *btid;
DbTableMethod* meth; /* table methods */
- erts_smp_atomic_t nitems; /* Total number of items in table */
- erts_smp_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */
+ erts_atomic_t nitems; /* Total number of items in table */
+ erts_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */
struct { /* Last fixation time */
ErtsMonotonicTime monotonic;
ErtsMonotonicTime offset;
} time;
- DbFixation* fixations; /* List of processes who have done safe_fixtable,
+ DbFixation* fixing_procs; /* Tree of processes who have done safe_fixtable,
"local" fixations not included. */
/* All 32-bit fields */
Uint32 status; /* bit masks defined below */
- int slot; /* slot index in meta_main_tab */
int keypos; /* defaults to 1 */
int compress;
+
+#ifdef ETS_DBG_FORCE_TRAP
+ erts_atomic_t dbg_force_trap; /* &1 force enabled, &2 trap this call */
+#endif
} DbTableCommon;
/* These are status bit patterns */
-#define DB_NORMAL (1 << 0)
-#define DB_PRIVATE (1 << 1)
-#define DB_PROTECTED (1 << 2)
-#define DB_PUBLIC (1 << 3)
-#define DB_BAG (1 << 4)
-#define DB_SET (1 << 5)
-/*#define DB_LHASH (1 << 6)*/
-#define DB_FINE_LOCKED (1 << 7) /* fine grained locking enabled */
-#define DB_DUPLICATE_BAG (1 << 8)
-#define DB_ORDERED_SET (1 << 9)
-#define DB_DELETE (1 << 10) /* table is being deleted */
-#define DB_FREQ_READ (1 << 11)
-
-#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED|DB_FREQ_READ)
+#define DB_PRIVATE (1 << 0)
+#define DB_PROTECTED (1 << 1)
+#define DB_PUBLIC (1 << 2)
+#define DB_DELETE (1 << 3) /* table is being deleted */
+#define DB_SET (1 << 4)
+#define DB_BAG (1 << 5)
+#define DB_DUPLICATE_BAG (1 << 6)
+#define DB_ORDERED_SET (1 << 7)
+#define DB_FINE_LOCKED (1 << 8) /* write_concurrency */
+#define DB_FREQ_READ (1 << 9) /* read_concurrency */
+#define DB_NAMED_TABLE (1 << 10)
+#define DB_BUSY (1 << 11)
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
#define IS_TREE_TABLE(Status) (!!((Status) & \
DB_ORDERED_SET))
-#define NFIXED(T) (erts_refc_read(&(T)->common.ref,0))
+#define NFIXED(T) (erts_refc_read(&(T)->common.fix_count,0))
#define IS_FIXED(T) (NFIXED(T) != 0)
/*
@@ -355,7 +386,8 @@ Eterm db_add_counter(Eterm** hpp, Wterm counter, Eterm incr);
Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags);
Binary *db_match_set_compile(Process *p, Eterm matchexpr,
Uint flags);
-void erts_db_match_prog_destructor(Binary *);
+int db_match_keeps_key(int keypos, Eterm match, Eterm guard, Eterm body);
+int erts_db_match_prog_destructor(Binary *);
typedef struct match_prog {
ErlHeapFragment *term_save; /* Only if needed, a list of message
@@ -439,7 +471,7 @@ Binary *db_match_compile(Eterm *matchexpr, Eterm *guards,
/* Returns newly allocated MatchProg binary with refc == 0*/
Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog,
- int all, DbTerm* obj, Eterm** hpp, Uint extra);
+ DbTerm* obj, Eterm** hpp, Uint extra);
Eterm db_prog_match(Process *p, Process *self,
Binary *prog, Eterm term,
@@ -456,29 +488,54 @@ Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei);
void db_free_dmc_err_info(DMCErrInfo *ei);
/* Completely free's an error info structure, including all recorded
errors */
-Eterm db_make_mp_binary(Process *p, Binary *mp, Eterm **hpp);
-/* Convert a match program to a erlang "magic" binary to be returned to userspace,
- increments the reference counter. */
-int erts_db_is_compiled_ms(Eterm term);
+
+ERTS_GLB_INLINE Eterm erts_db_make_match_prog_ref(Process *p, Binary *mp, Eterm **hpp);
+ERTS_GLB_INLINE Binary *erts_db_get_match_prog_binary(Eterm term);
+ERTS_GLB_INLINE Binary *erts_db_get_match_prog_binary_unchecked(Eterm term);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+/*
+ * Convert a match program to a "magic" ref to return up to erlang
+ */
+ERTS_GLB_INLINE Eterm erts_db_make_match_prog_ref(Process *p, Binary *mp, Eterm **hpp)
+{
+ return erts_mk_magic_ref(hpp, &MSO(p), mp);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_db_get_match_prog_binary_unchecked(Eterm term)
+{
+ Binary *bp = erts_magic_ref2bin(term);
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ ASSERT((ERTS_MAGIC_BIN_DESTRUCTOR(bp) == erts_db_match_prog_destructor));
+ return bp;
+}
+
+ERTS_GLB_INLINE Binary *
+erts_db_get_match_prog_binary(Eterm term)
+{
+ Binary *bp;
+ if (!is_internal_magic_ref(term))
+ return NULL;
+ bp = erts_magic_ref2bin(term);
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(bp) != erts_db_match_prog_destructor)
+ return NULL;
+ return bp;
+}
+
+#endif
/*
** Convenience when compiling into Binary structures
*/
#define IsMatchProgBinary(BP) \
- (((BP)->flags & BIN_FLAG_MAGIC) \
+ (((BP)->intern.flags & BIN_FLAG_MAGIC) \
&& ERTS_MAGIC_BIN_DESTRUCTOR((BP)) == erts_db_match_prog_destructor)
#define Binary2MatchProg(BP) \
(ASSERT(IsMatchProgBinary((BP))), \
((MatchProg *) ERTS_MAGIC_BIN_DATA((BP))))
-/*
-** Debugging
-*/
-#ifdef HARDDEBUG
-void db_check_tables(void); /* in db.c */
-#define CHECK_TABLES() db_check_tables()
-#else
-#define CHECK_TABLES()
-#endif
#endif /* _DB_UTIL_H */
diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c
index 3e3bfa03a2..db78378257 100644
--- a/erts/emulator/beam/erl_debug.c
+++ b/erts/emulator/beam/erl_debug.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -60,10 +60,10 @@ static const char dashes[PTR_SIZE+3] = {
void pps(Process*, Eterm*);
void ptd(Process*, Eterm);
-void paranoid_display(int, void*, Process*, Eterm);
+void paranoid_display(fmtfn_t, void*, Process*, Eterm);
static int dcount;
-static int pdisplay1(int to, void *to_arg, Process* p, Eterm obj);
+static int pdisplay1(fmtfn_t to, void *to_arg, Process* p, Eterm obj);
void ptd(Process* p, Eterm x)
{
@@ -77,14 +77,14 @@ void ptd(Process* p, Eterm x)
*/
void
-paranoid_display(int to, void *to_arg, Process* p, Eterm obj)
+paranoid_display(fmtfn_t to, void *to_arg, Process* p, Eterm obj)
{
dcount = 100000;
pdisplay1(to, to_arg, p, obj);
}
static int
-pdisplay1(int to, void *to_arg, Process* p, Eterm obj)
+pdisplay1(fmtfn_t to, void *to_arg, Process* p, Eterm obj)
{
int i, k;
Eterm* nobj;
@@ -130,7 +130,7 @@ pdisplay1(int to, void *to_arg, Process* p, Eterm obj)
Uint32 *ref_num;
erts_print(to, to_arg, "#Ref<%lu", ref_channel_no(obj));
ref_num = ref_numbers(obj);
- for (i = ref_no_of_numbers(obj)-1; i >= 0; i--)
+ for (i = ref_no_numbers(obj)-1; i >= 0; i--)
erts_print(to, to_arg, ",%lu", ref_num[i]);
erts_print(to, to_arg, ">");
break;
@@ -201,7 +201,7 @@ pdisplay1(int to, void *to_arg, Process* p, Eterm obj)
void
pps(Process* p, Eterm* stop)
{
- int to = ERTS_PRINT_STDOUT;
+ fmtfn_t to = ERTS_PRINT_STDOUT;
void *to_arg = NULL;
Eterm* sp = STACK_START(p) - 1;
@@ -404,15 +404,16 @@ void verify_process(Process *p)
erts_exit(ERTS_ERROR_EXIT,"Wild pointer found in " name " of %T!\n",p->common.id); }
- ErtsMessage* mp = p->msg.first;
-
VERBOSE(DEBUG_MEMORY,("Verify process: %T...\n",p->common.id));
- while (mp != NULL) {
- VERIFY_ETERM("message term",ERL_MESSAGE_TERM(mp));
- VERIFY_ETERM("message token",ERL_MESSAGE_TOKEN(mp));
- mp = mp->next;
- }
+ ERTS_FOREACH_SIG_PRIVQS(
+ p, mp,
+ {
+ if (ERTS_SIG_IS_MSG(mp)) {
+ VERIFY_ETERM("message term",ERL_MESSAGE_TERM(mp));
+ VERIFY_ETERM("message token",ERL_MESSAGE_TOKEN(mp));
+ }
+ });
erts_check_stack(p);
erts_check_heap(p);
@@ -532,16 +533,15 @@ static void print_process_memory(Process *p)
erts_printf("-- %-*s ---%s-%s-%s-%s--\n",
PTR_SIZE, "PCB", dashes, dashes, dashes, dashes);
- if (p->msg.first != NULL) {
- ErtsMessage* mp;
- erts_printf(" Message Queue:\n");
- mp = p->msg.first;
- while (mp != NULL) {
- erts_printf("| 0x%0*lx | 0x%0*lx |\n",PTR_SIZE,
- ERL_MESSAGE_TERM(mp),PTR_SIZE,ERL_MESSAGE_TOKEN(mp));
- mp = mp->next;
- }
- }
+
+ erts_printf(" Message Queue:\n");
+ ERTS_FOREACH_SIG_PRIVQS(
+ p, mp,
+ {
+ if (ERTS_SIG_IS_MSG(mp))
+ erts_printf("| 0x%0*lx | 0x%0*lx |\n",PTR_SIZE,
+ ERL_MESSAGE_TERM(mp),PTR_SIZE,ERL_MESSAGE_TOKEN(mp));
+ });
if (p->dictionary != NULL) {
int n = ERTS_PD_SIZE(p->dictionary);
diff --git a/erts/emulator/beam/erl_dirty_bif.tab b/erts/emulator/beam/erl_dirty_bif.tab
new file mode 100644
index 0000000000..20299ff604
--- /dev/null
+++ b/erts/emulator/beam/erl_dirty_bif.tab
@@ -0,0 +1,85 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2016-2018. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# %CopyrightEnd%
+#
+
+#
+# Static declaration of BIFs that should execute on dirty schedulers.
+#
+# <dirty-bif-decl> ::= <type> <bif>
+# <bif> ::= <module> ":" <name> "/" <arity>
+# <type> ::= dirty-cpu | dirty-io | dirty-cpu-test | dirty-io-test
+#
+# When dirty scheduler support is available, a BIF declared with the
+# 'dirty-cpu' type will unconditionally execute on a dirty CPU scheduler,
+# and a BIF declared with the type 'dirty-io' will unconditionally execute
+# on a dirty IO scheduler. When dirty scheduler support is not available
+# all BIFs will of course execute on normal schedulers.
+#
+# When the emulator has been configured with the debug option
+# '--enable-dirty-schedulers-test', BIFs with the types 'dirty-cpu-test',
+# and 'dirty-io-test' will unconditionally execute on dirty schedulers.
+# When this debug option has not been enabled, these BIFs will be executed
+# on normal schedulers.
+#
+# BIFs marked as 'ubif' in ./bif.tab will be ignored, i.e., will always
+# execute on normal schedulers.
+#
+
+# --- Dirty BIFs ---
+
+dirty-cpu erts_debug:dirty_cpu/2
+dirty-io erts_debug:dirty_io/2
+
+# lcnt_control/1 doesn't need to be dirty.
+dirty-cpu erts_debug:lcnt_control/2
+dirty-cpu erts_debug:lcnt_collect/0
+dirty-cpu erts_debug:lcnt_clear/0
+
+# --- TEST of Dirty BIF functionality ---
+# Functions below will execute on dirty schedulers when emulator has
+# been configured for testing dirty schedulers. This is used for test
+# and debug purposes only. We really do *not* want to execute these
+# on dirty schedulers on a real system.
+
+dirty-cpu-test erlang:'++'/2
+dirty-cpu-test erlang:append/2
+dirty-cpu-test erlang:iolist_size/1
+dirty-cpu-test erlang:make_tuple/2
+dirty-cpu-test erlang:make_tuple/3
+dirty-cpu-test erlang:append_element/2
+dirty-cpu-test erlang:insert_element/3
+dirty-cpu-test erlang:delete_element/2
+dirty-cpu-test erlang:atom_to_list/1
+dirty-cpu-test erlang:list_to_atom/1
+dirty-cpu-test erlang:list_to_existing_atom/1
+dirty-cpu-test erlang:integer_to_list/1
+dirty-cpu-test erlang:string_to_integer/1
+dirty-cpu-test erlang:list_to_integer/1
+dirty-cpu-test erlang:list_to_integer/2
+dirty-cpu-test erlang:float_to_list/1
+dirty-cpu-test erlang:float_to_list/2
+dirty-cpu-test erlang:float_to_binary/1
+dirty-cpu-test erlang:float_to_binary/2
+dirty-cpu-test erlang:string_to_float/1
+dirty-cpu-test erlang:list_to_float/1
+dirty-cpu-test erlang:binary_to_float/1
+dirty-cpu-test erlang:tuple_to_list/1
+dirty-cpu-test erlang:list_to_tuple/1
+dirty-cpu-test erlang:display/1
+dirty-cpu-test erlang:display_string/1
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 97a69140c3..d5379a40d5 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -40,7 +40,6 @@
#include "erl_drv_nif.h"
#include <stdlib.h>
-#include <sys/types.h> /* ssize_t */
#if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)
#ifndef STATIC_ERLANG_DRIVER
@@ -48,24 +47,6 @@
#define ERL_DRIVER_TYPES_ONLY
#define WIN32_DYNAMIC_ERL_DRIVER
#endif
-/*
- * This structure can be cast to a WSABUF structure.
- */
-typedef struct _SysIOVec {
- unsigned long iov_len;
- char* iov_base;
-} SysIOVec;
-#else /* Unix */
-# ifdef HAVE_SYS_UIO_H
-# include <sys/types.h>
-# include <sys/uio.h>
-typedef struct iovec SysIOVec;
-# else
-typedef struct {
- char* iov_base;
- size_t iov_len;
-} SysIOVec;
-# endif
#endif
#ifndef EXTERN
@@ -76,11 +57,10 @@ typedef struct {
# endif
#endif
-/* Values for mode arg to driver_select() */
-#define ERL_DRV_READ (1 << 0)
-#define ERL_DRV_WRITE (1 << 1)
-#define ERL_DRV_USE (1 << 2)
-#define ERL_DRV_USE_NO_CALLBACK (ERL_DRV_USE | (1 << 3))
+#define ERL_DRV_READ ((int)ERL_NIF_SELECT_READ)
+#define ERL_DRV_WRITE ((int)ERL_NIF_SELECT_WRITE)
+#define ERL_DRV_USE ((int)ERL_NIF_SELECT_STOP)
+#define ERL_DRV_USE_NO_CALLBACK (ERL_DRV_USE | (ERL_DRV_USE << 1))
/* Old deprecated */
#define DO_READ ERL_DRV_READ
@@ -168,21 +148,6 @@ typedef struct _erl_drv_event* ErlDrvEvent; /* An event to be selected on. */
typedef struct _erl_drv_port* ErlDrvPort; /* A port descriptor. */
typedef struct _erl_drv_port* ErlDrvThreadData; /* Thread data. */
-#if !defined(__WIN32__) && !defined(_WIN32) && !defined(_WIN32_) && !defined(USE_SELECT)
-struct erl_drv_event_data {
- short events;
- short revents;
-};
-#endif
-typedef struct erl_drv_event_data *ErlDrvEventData; /* Event data */
-
-/*
- * A driver monitor
- */
-typedef struct {
- unsigned char data[sizeof(void *)*4];
-} ErlDrvMonitor;
-
typedef struct {
unsigned long megasecs;
unsigned long secs;
@@ -297,10 +262,7 @@ typedef struct erl_drv_entry {
unsigned int *flags); /* Works mostly like 'control',
a synchronous
call into the driver. */
- void (*event)(ErlDrvData drv_data, ErlDrvEvent event,
- ErlDrvEventData event_data);
- /* Called when an event selected by
- driver_event() has occurred */
+ void (*unused_event_callback)(void);
int extended_marker; /* ERL_DRV_EXTENDED_MARKER */
int major_version; /* ERL_DRV_EXTENDED_MAJOR_VERSION */
int minor_version; /* ERL_DRV_EXTENDED_MINOR_VERSION */
@@ -367,8 +329,6 @@ EXTERN void erl_drv_busy_msgq_limits(ErlDrvPort port,
ErlDrvSizeT *high);
EXTERN int driver_select(ErlDrvPort port, ErlDrvEvent event, int mode, int on);
-EXTERN int driver_event(ErlDrvPort port, ErlDrvEvent event,
- ErlDrvEventData event_data);
EXTERN int driver_output(ErlDrvPort port, char *buf, ErlDrvSizeT len);
EXTERN int driver_output2(ErlDrvPort port, char *hbuf, ErlDrvSizeT hlen,
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index 6ec5fbb895..31b4817fb1 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -49,6 +49,21 @@ typedef enum {
ERL_DIRTY_JOB_IO_BOUND = 2
} ErlDirtyJobFlags;
+/* Values for enif_select AND mode arg for driver_select() */
+enum ErlNifSelectFlags {
+ ERL_NIF_SELECT_READ = (1 << 0),
+ ERL_NIF_SELECT_WRITE = (1 << 1),
+ ERL_NIF_SELECT_STOP = (1 << 2)
+};
+
+/*
+ * A driver monitor
+ */
+typedef struct {
+ unsigned char data[sizeof(void *)*4];
+} ErlDrvMonitor;
+
+
#ifdef SIZEOF_CHAR
# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
# undef SIZEOF_CHAR
@@ -69,10 +84,6 @@ typedef enum {
# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
# undef SIZEOF_LONG_LONG
#endif
-#ifdef HALFWORD_HEAP_EMULATOR
-# define HALFWORD_HEAP_EMULATOR_SAVED__ HALFWORD_HEAP_EMULATOR
-# undef HALFWORD_HEAP_EMULATOR
-#endif
#include "erl_int_sizes_config.h"
#if defined(SIZEOF_CHAR_SAVED__) && SIZEOF_CHAR_SAVED__ != SIZEOF_CHAR
# error SIZEOF_CHAR mismatch
@@ -133,8 +144,25 @@ typedef signed int ErlNapiSInt;
#define ERTS_NAPI_USEC__ 2
#define ERTS_NAPI_NSEC__ 3
-#endif /* __ERL_DRV_NIF_H__ */
-
-
-
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+/*
+ * This structure can be cast to a WSABUF structure.
+ */
+typedef struct _SysIOVec {
+ unsigned long iov_len;
+ char* iov_base;
+} SysIOVec;
+#else /* Unix */
+# include <sys/types.h>
+# ifdef HAVE_SYS_UIO_H
+# include <sys/uio.h>
+typedef struct iovec SysIOVec;
+# else
+typedef struct {
+ char* iov_base;
+ size_t iov_len;
+} SysIOVec;
+# endif
+#endif
+#endif /* __ERL_DRV_NIF_H__ */
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 92edce5176..c5dbc87dee 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -50,10 +50,12 @@ fatal_error(int err, char *func)
#define ERL_DRV_TSD_EXTRA 10
#define ERL_DRV_INVALID_TSD_KEY INT_MAX
-#ifdef USE_THREADS
struct ErlDrvMutex_ {
ethr_mutex mtx;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_ref_t lcnt;
+#endif
char *name;
};
@@ -64,6 +66,9 @@ struct ErlDrvCond_ {
struct ErlDrvRWLock_ {
ethr_rwmutex rwmtx;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_ref_t lcnt;
+#endif
char *name;
};
@@ -79,10 +84,6 @@ struct ErlDrvTid_ {
static ethr_tsd_key tid_key;
-#else /* USE_THREADS */
-static Uint tsd_len;
-static void **tsd;
-#endif
static ErlDrvTSDKey next_tsd_key;
static ErlDrvTSDKey max_used_tsd_key;
@@ -91,7 +92,6 @@ static char **used_tsd_keys;
static erts_mtx_t tsd_mtx;
static char *no_name;
-#ifdef USE_THREADS
static void
thread_exit_handler(void)
@@ -116,21 +116,15 @@ erl_drv_thread_wrapper(void *vdtid)
return (*dtid->func)(dtid->arg);
}
-#endif
void erl_drv_thr_init(void)
{
int i;
-#ifdef USE_THREADS
int res = ethr_tsd_key_create(&tid_key,"erts_tid_key");
if (res == 0)
res = ethr_install_exit_handler(thread_exit_handler);
if (res != 0)
fatal_error(res, "erl_drv_thr_init()");
-#else
- tsd_len = 0;
- tsd = NULL;
-#endif
no_name = "unknown";
next_tsd_key = 0;
@@ -140,19 +134,19 @@ void erl_drv_thr_init(void)
sizeof(char *)*ERL_DRV_TSD_KEYS_INC);
for (i = 0; i < ERL_DRV_TSD_KEYS_INC; i++)
used_tsd_keys[i] = NULL;
- erts_mtx_init(&tsd_mtx, "drv_tsd");
+ erts_mtx_init(&tsd_mtx, "drv_tsd", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
}
/*
* These functions implement the driver thread interface in erl_driver.h.
* NOTE: Only use this interface from drivers. From within the emulator use
- * either the erl_threads.h, the erl_smp.h or the ethread.h interface.
+ * either the erl_threads.h or the ethread.h interface.
*/
ErlDrvMutex *
erl_drv_mutex_create(char *name)
{
-#ifdef USE_THREADS
ErlDrvMutex *dmtx = erts_alloc_fnf(ERTS_ALC_T_DRV_MTX,
(sizeof(ErlDrvMutex)
+ (name ? sys_strlen(name) + 1 : 0)));
@@ -161,79 +155,83 @@ erl_drv_mutex_create(char *name)
opt.posix_compliant = 1;
if (ethr_mutex_init_opt(&dmtx->mtx, &opt) != 0) {
erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
- dmtx = NULL;
+ return NULL;
}
- else if (!name)
- dmtx->name = no_name;
- else {
+ if (name) {
dmtx->name = ((char *) dmtx) + sizeof(ErlDrvMutex);
sys_strcpy(dmtx->name, name);
+ } else {
+ dmtx->name = no_name;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_ref_x(&dmtx->lcnt, dmtx->name, NIL,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+#endif
}
return dmtx;
-#else
- return (ErlDrvMutex *) NULL;
-#endif
}
void
erl_drv_mutex_destroy(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
- int res = dmtx ? ethr_mutex_destroy(&dmtx->mtx) : EINVAL;
+ int res;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_uninstall(&dmtx->lcnt);
+#endif
+ res = dmtx ? ethr_mutex_destroy(&dmtx->mtx) : EINVAL;
if (res != 0)
fatal_error(res, "erl_drv_mutex_destroy()");
erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
-#endif
}
char *
erl_drv_mutex_name(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
return dmtx ? dmtx->name : NULL;
-#else
- return NULL;
-#endif
}
int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
+ int res;
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_trylock()");
- return ethr_mutex_trylock(&dmtx->mtx);
-#else
- return 0;
+ res = ethr_mutex_trylock(&dmtx->mtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&dmtx->lcnt, res);
#endif
+ return res;
}
void
erl_drv_mutex_lock(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_lock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock(&dmtx->lcnt);
+#endif
ethr_mutex_lock(&dmtx->mtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&dmtx->lcnt);
#endif
}
void
erl_drv_mutex_unlock(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_unlock()");
- ethr_mutex_unlock(&dmtx->mtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock(&dmtx->lcnt);
#endif
+ ethr_mutex_unlock(&dmtx->mtx);
}
ErlDrvCond *
erl_drv_cond_create(char *name)
{
-#ifdef USE_THREADS
ErlDrvCond *dcnd = erts_alloc_fnf(ERTS_ALC_T_DRV_CND,
(sizeof(ErlDrvCond)
+ (name ? sys_strlen(name) + 1 : 0)));
@@ -252,176 +250,179 @@ erl_drv_cond_create(char *name)
}
}
return dcnd;
-#else
- return (ErlDrvCond *) NULL;
-#endif
}
void
erl_drv_cond_destroy(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
int res = dcnd ? ethr_cond_destroy(&dcnd->cnd) : EINVAL;
if (res != 0)
fatal_error(res, "erl_drv_cond_destroy()");
erts_free(ERTS_ALC_T_DRV_CND, (void *) dcnd);
-#endif
}
char *
erl_drv_cond_name(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
return dcnd ? dcnd->name : NULL;
-#else
- return NULL;
-#endif
}
void
erl_drv_cond_signal(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
if (!dcnd)
fatal_error(EINVAL, "erl_drv_cond_signal()");
ethr_cond_signal(&dcnd->cnd);
-#endif
}
void
erl_drv_cond_broadcast(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
if (!dcnd)
fatal_error(EINVAL, "erl_drv_cond_broadcast()");
ethr_cond_broadcast(&dcnd->cnd);
-#endif
}
void
erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
if (!dcnd || !dmtx) {
fatal_error(EINVAL, "erl_drv_cond_wait()");
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock(&dmtx->lcnt);
+#endif
while (1) {
int res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
- if (res == 0)
+ if (res == 0) {
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock(&dmtx->lcnt);
+ erts_lcnt_lock_post(&dmtx->lcnt);
+#endif
break;
+ }
}
-#endif
}
ErlDrvRWLock *
erl_drv_rwlock_create(char *name)
{
-#ifdef USE_THREADS
ErlDrvRWLock *drwlck = erts_alloc_fnf(ERTS_ALC_T_DRV_RWLCK,
(sizeof(ErlDrvRWLock)
+ (name ? sys_strlen(name) + 1 : 0)));
if (drwlck) {
if (ethr_rwmutex_init(&drwlck->rwmtx) != 0) {
erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck);
- drwlck = NULL;
+ return NULL;
}
- else if (!name)
- drwlck->name = no_name;
- else {
+ if (name) {
drwlck->name = ((char *) drwlck) + sizeof(ErlDrvRWLock);
sys_strcpy(drwlck->name, name);
+ } else {
+ drwlck->name = no_name;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_ref_x(&drwlck->lcnt, drwlck->name, NIL,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+#endif
}
return drwlck;
-#else
- return (ErlDrvRWLock *) NULL;
-#endif
}
void
erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
- int res = drwlck ? ethr_rwmutex_destroy(&drwlck->rwmtx) : EINVAL;
+ int res;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_uninstall(&drwlck->lcnt);
+#endif
+ res = drwlck ? ethr_rwmutex_destroy(&drwlck->rwmtx) : EINVAL;
if (res != 0)
fatal_error(res, "erl_drv_rwlock_destroy()");
erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck);
-#endif
}
char *
erl_drv_rwlock_name(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
return drwlck ? drwlck->name : NULL;
-#else
- return NULL;
-#endif
}
int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
+ int res;
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
- return ethr_rwmutex_tryrlock(&drwlck->rwmtx);
-#else
- return 0;
+ res = ethr_rwmutex_tryrlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_READ);
#endif
+ return res;
}
void
erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ);
+#endif
ethr_rwmutex_rlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&drwlck->lcnt);
#endif
}
void
erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
- ethr_rwmutex_runlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
+ ethr_rwmutex_runlock(&drwlck->rwmtx);
}
int
erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
+ int res;
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
- return ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
-#else
- return 0;
+ res = ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_RDWR);
#endif
+ return res;
}
void
erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR);
+#endif
ethr_rwmutex_rwlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&drwlck->lcnt);
#endif
}
void
erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
- ethr_rwmutex_rwunlock(&drwlck->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
+ ethr_rwmutex_rwunlock(&drwlck->rwmtx);
}
int
@@ -438,7 +439,7 @@ erl_drv_tsd_key_create(char *name, ErlDrvTSDKey *key)
name_copy = no_name;
else {
name_copy = erts_alloc_fnf(ERTS_ALC_T_DRV_TSD,
- sizeof(char)*(strlen(name) + 1));
+ sizeof(char)*(sys_strlen(name) + 1));
if (!name_copy) {
*key = -1;
return ENOMEM;
@@ -515,20 +516,13 @@ erl_drv_tsd_key_destroy(ErlDrvTSDKey key)
}
-#ifdef USE_THREADS
#define ERL_DRV_TSD__ (dtid->tsd)
#define ERL_DRV_TSD_LEN__ (dtid->tsd_len)
-#else
-#define ERL_DRV_TSD__ (tsd)
-#define ERL_DRV_TSD_LEN__ (tsd_len)
-#endif
void
erl_drv_tsd_set(ErlDrvTSDKey key, void *data)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) erl_drv_thread_self();
-#endif
if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key])
fatal_error(EINVAL, "erl_drv_tsd_set()");
@@ -556,15 +550,11 @@ erl_drv_tsd_set(ErlDrvTSDKey key, void *data)
void *
erl_drv_tsd_get(ErlDrvTSDKey key)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
-#endif
if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key])
fatal_error(EINVAL, "erl_drv_tsd_get()");
-#ifdef USE_THREADS
if (!dtid)
return NULL;
-#endif
if (ERL_DRV_TSD_LEN__ <= key)
return NULL;
return ERL_DRV_TSD__[key];
@@ -599,7 +589,6 @@ erl_drv_thread_create(char *name,
void* arg,
ErlDrvThreadOpts *opts)
{
-#ifdef USE_THREADS
int res;
struct ErlDrvTid_ *dtid;
ethr_thr_opts ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
@@ -641,27 +630,19 @@ erl_drv_thread_create(char *name,
*tid = (ErlDrvTid) dtid;
return 0;
-#else
- return ENOTSUP;
-#endif
}
char *
erl_drv_thread_name(ErlDrvTid tid)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid;
return dtid ? dtid->name : NULL;
-#else
- return NULL;
-#endif
}
ErlDrvTid
erl_drv_thread_self(void)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
if (!dtid) {
int res;
@@ -680,15 +661,11 @@ erl_drv_thread_self(void)
fatal_error(res, "erl_drv_thread_self()");
}
return (ErlDrvTid) dtid;
-#else
- return (ErlDrvTid) NULL;
-#endif
}
int
erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2)
{
-#ifdef USE_THREADS
int res;
struct ErlDrvTid_ *dtid1 = (struct ErlDrvTid_ *) tid1;
struct ErlDrvTid_ *dtid2 = (struct ErlDrvTid_ *) tid2;
@@ -702,28 +679,22 @@ erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2)
: !ethr_equal_tids(dtid1->tid, dtid2->tid));
return res;
-#else
- return 1;
-#endif
}
void
erl_drv_thread_exit(void *res)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
if (dtid && dtid->drv_thr) {
ethr_thr_exit(res);
fatal_error(0, "erl_drv_thread_exit()");
}
-#endif
fatal_error(EACCES, "erl_drv_thread_exit()");
}
int
erl_drv_thread_join(ErlDrvTid tid, void **respp)
{
-#ifdef USE_THREADS
int res;
struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid;
@@ -736,12 +707,9 @@ erl_drv_thread_join(ErlDrvTid tid, void **respp)
if (res == 0)
erts_free(ERTS_ALC_T_DRV_TID, dtid);
return res;
-#else
- return ENOTSUP;
-#endif
}
-#if defined(__DARWIN__) && defined(USE_THREADS) && defined(ERTS_SMP)
+#if defined(__DARWIN__)
extern int erts_darwin_main_thread_pipe[2];
extern int erts_darwin_main_thread_result_pipe[2];
@@ -782,7 +750,7 @@ erl_drv_steal_main_thread(char *name,
+ (name ? sys_strlen(name) + 1 : 0)));
if (!dtid)
return ENOMEM;
- memset(dtid,0,sizeof(ErlDrvTid_));
+ sys_memset(dtid,0,sizeof(ErlDrvTid_));
dtid->tid = (void * ) -1;
dtid->drv_thr = 1;
dtid->func = func;
@@ -795,8 +763,8 @@ erl_drv_steal_main_thread(char *name,
*tid = NULL;
/* Ignore options and name... */
- memcpy(buff,&func,sizeof(void* (*)(void*)));
- memcpy(buff + sizeof(void* (*)(void*)),&arg,sizeof(void *));
+ sys_memcpy(buff,&func,sizeof(void* (*)(void*)));
+ sys_memcpy(buff + sizeof(void* (*)(void*)),&arg,sizeof(void *));
write(erts_darwin_main_thread_pipe[1],buff,buff_sz);
return 0;
}
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index c639ba623f..9c866250bb 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -30,14 +30,16 @@
static Hash erts_fun_table;
-#include "erl_smp.h"
+#ifdef HIPE
+# include "hipe_mode_switch.h"
+#endif
-static erts_smp_rwmtx_t erts_fun_table_lock;
+static erts_rwmtx_t erts_fun_table_lock;
-#define erts_fun_read_lock() erts_smp_rwmtx_rlock(&erts_fun_table_lock)
-#define erts_fun_read_unlock() erts_smp_rwmtx_runlock(&erts_fun_table_lock)
-#define erts_fun_write_lock() erts_smp_rwmtx_rwlock(&erts_fun_table_lock)
-#define erts_fun_write_unlock() erts_smp_rwmtx_rwunlock(&erts_fun_table_lock)
+#define erts_fun_read_lock() erts_rwmtx_rlock(&erts_fun_table_lock)
+#define erts_fun_read_unlock() erts_rwmtx_runlock(&erts_fun_table_lock)
+#define erts_fun_write_lock() erts_rwmtx_rwlock(&erts_fun_table_lock)
+#define erts_fun_write_unlock() erts_rwmtx_rwunlock(&erts_fun_table_lock)
static HashValue fun_hash(ErlFunEntry* obj);
static int fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2);
@@ -49,18 +51,19 @@ static void fun_free(ErlFunEntry* obj);
* to unloaded_fun[]. The -1 in unloaded_fun[0] will be interpreted
* as an illegal arity when attempting to call a fun.
*/
-static BeamInstr unloaded_fun_code[3] = {NIL, -1, 0};
-static BeamInstr* unloaded_fun = unloaded_fun_code + 2;
+static BeamInstr unloaded_fun_code[4] = {NIL, NIL, -1, 0};
+static BeamInstr* unloaded_fun = unloaded_fun_code + 3;
void
erts_init_fun_table(void)
{
HashFunctions f;
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab");
+ erts_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) fun_hash;
f.cmp = (HCMP_FUN) fun_cmp;
@@ -74,7 +77,7 @@ erts_init_fun_table(void)
}
void
-erts_fun_info(int to, void *to_arg)
+erts_fun_info(fmtfn_t to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
@@ -179,13 +182,11 @@ void
erts_erase_fun_entry(ErlFunEntry* fe)
{
erts_fun_write_lock();
-#ifdef ERTS_SMP
/*
* We have to check refc again since someone might have looked up
* the fun entry and incremented refc after last check.
*/
if (erts_refc_dectest(&fe->refc, -1) <= 0)
-#endif
{
if (fe->address != unloaded_fun)
erts_exit(ERTS_ERROR_EXIT,
@@ -217,8 +218,12 @@ erts_fun_purge_prepare(BeamInstr* start, BeamInstr* end)
if (start <= addr && addr < end) {
fe->pend_purge_address = addr;
- ERTS_SMP_WRITE_MEMORY_BARRIER;
+ ERTS_THR_WRITE_MEMORY_BARRIER;
fe->address = unloaded_fun;
+#ifdef HIPE
+ fe->pend_purge_native_address = fe->native_address;
+ hipe_set_closure_stub(fe);
+#endif
erts_purge_state_add_fun(fe);
}
b = b->next;
@@ -234,9 +239,12 @@ erts_fun_purge_abort_prepare(ErlFunEntry **funs, Uint no)
for (ix = 0; ix < no; ix++) {
ErlFunEntry *fe = funs[ix];
- if (fe->address == unloaded_fun)
+ if (fe->address == unloaded_fun) {
fe->address = fe->pend_purge_address;
- fe->pend_purge_address = NULL;
+#ifdef HIPE
+ fe->native_address = fe->pend_purge_native_address;
+#endif
+ }
}
}
@@ -245,8 +253,12 @@ erts_fun_purge_abort_finalize(ErlFunEntry **funs, Uint no)
{
Uint ix;
- for (ix = 0; ix < no; ix++)
+ for (ix = 0; ix < no; ix++) {
funs[ix]->pend_purge_address = NULL;
+#ifdef HIPE
+ funs[ix]->pend_purge_native_address = NULL;
+#endif
+ }
}
void
@@ -257,14 +269,17 @@ erts_fun_purge_complete(ErlFunEntry **funs, Uint no)
for (ix = 0; ix < no; ix++) {
ErlFunEntry *fe = funs[ix];
fe->pend_purge_address = NULL;
+#ifdef HIPE
+ fe->pend_purge_native_address = NULL;
+#endif
if (erts_refc_dectest(&fe->refc, 0) == 0)
erts_erase_fun_entry(fe);
}
- ERTS_SMP_WRITE_MEMORY_BARRIER;
+ ERTS_THR_WRITE_MEMORY_BARRIER;
}
void
-erts_dump_fun_entries(int to, void *to_arg)
+erts_dump_fun_entries(fmtfn_t to, void *to_arg)
{
int limit;
HashBucket** bucket;
@@ -325,6 +340,7 @@ fun_alloc(ErlFunEntry* template)
obj->pend_purge_address = NULL;
#ifdef HIPE
obj->native_address = NULL;
+ obj->pend_purge_native_address = NULL;
#endif
return obj;
}
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
index 73c3e19c1c..fb2901d866 100644
--- a/erts/emulator/beam/erl_fun.h
+++ b/erts/emulator/beam/erl_fun.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@
#ifndef __ERLFUNTABLE_H__
#define __ERLFUNTABLE_H__
-#include "erl_smp.h"
+#include "erl_threads.h"
/*
* Fun entry.
@@ -45,6 +45,9 @@ typedef struct erl_fun_entry {
erts_refc_t refc; /* Reference count: One for code + one for each
fun object in each process. */
BeamInstr *pend_purge_address; /* address stored during a pending purge */
+#ifdef HIPE
+ UWord* pend_purge_native_address;
+#endif
} ErlFunEntry;
/*
@@ -57,9 +60,6 @@ typedef struct erl_fun_thing {
Eterm thing_word; /* Subtag FUN_SUBTAG. */
ErlFunEntry* fe; /* Pointer to fun entry. */
struct erl_off_heap_header* next;
-#ifdef HIPE
- UWord* native_address; /* Native code for the fun. */
-#endif
Uint arity; /* The arity of the fun. */
Uint num_free; /* Number of free variables (in env). */
/* -- The following may be compound Erlang terms ---------------------- */
@@ -71,7 +71,7 @@ typedef struct erl_fun_thing {
#define ERL_FUN_SIZE ((sizeof(ErlFunThing)/sizeof(Eterm))-1)
void erts_init_fun_table(void);
-void erts_fun_info(int, void *);
+void erts_fun_info(fmtfn_t, void *);
int erts_fun_table_sz(void);
ErlFunEntry* erts_put_fun_entry(Eterm mod, int uniq, int index);
@@ -86,6 +86,6 @@ void erts_fun_purge_prepare(BeamInstr* start, BeamInstr* end);
void erts_fun_purge_abort_prepare(ErlFunEntry **funs, Uint no);
void erts_fun_purge_abort_finalize(ErlFunEntry **funs, Uint no);
void erts_fun_purge_complete(ErlFunEntry **funs, Uint no);
-void erts_dump_fun_entries(int, void *);
+void erts_dump_fun_entries(fmtfn_t, void *);
#endif
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 6f641a1ea7..d5dfb096b1 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -35,13 +35,15 @@
#include "error.h"
#include "big.h"
#include "erl_gc.h"
-#if HIPE
+#ifdef HIPE
#include "hipe_stack.h"
#include "hipe_mode_switch.h"
#endif
#include "dtrace-wrapper.h"
#include "erl_bif_unique.h"
#include "dist.h"
+#include "erl_nfunc_sched.h"
+#include "erl_proc_sig_queue.h"
#define ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT 1
#define ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE 20
@@ -59,6 +61,10 @@
# define ERTS_GC_ASSERT(B) ((void) 1)
#endif
+#if defined(DEBUG) && 0
+# define HARDDEBUG 1
+#endif
+
/*
* Returns number of elements in an array.
*/
@@ -110,18 +116,21 @@ typedef struct {
static Uint setup_rootset(Process*, Eterm*, int, Rootset*);
static void cleanup_rootset(Rootset *rootset);
-static void remove_message_buffers(Process* p);
static Eterm *full_sweep_heaps(Process *p,
+ ErlHeapFragment *live_hf_end,
int hibernate,
Eterm *n_heap, Eterm* n_htop,
char *oh, Uint oh_size,
Eterm *objv, int nobj);
static int garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, int fcalls);
+ int need, Eterm* objv, int nobj, int fcalls,
+ Uint max_young_gen_usage);
static int major_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl);
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl);
static int minor_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl);
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl);
static void do_minor(Process *p, ErlHeapFragment *live_hf_end,
char *mature, Uint mature_size,
Uint new_sz, Eterm* objv, int nobj);
@@ -135,7 +144,7 @@ static Eterm* sweep_literal_area(Eterm* n_hp, Eterm* n_htop,
static Eterm* sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
char* src, Uint src_size);
static Eterm* collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end,
- Eterm* heap, Eterm* htop, Eterm* objv, int nobj);
+ Eterm* htop);
static int adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj);
static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj);
static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj);
@@ -146,14 +155,13 @@ static void offset_rootset(Process *p, Sint offs, char* area, Uint area_size,
Eterm* objv, int nobj);
static void offset_off_heap(Process* p, Sint offs, char* area, Uint area_size);
static void offset_mqueue(Process *p, Sint offs, char* area, Uint area_size);
-static void move_msgq_to_heap(Process *p);
static int reached_max_heap_size(Process *p, Uint total_heap_size,
Uint extra_heap_size, Uint extra_old_heap_size);
static void init_gc_info(ErtsGCInfo *gcip);
static Uint64 next_vheap_size(Process* p, Uint64 vheap, Uint64 vheap_sz);
#ifdef HARDDEBUG
-static void disallow_heap_frag_ref_in_heap(Process* p);
+static void disallow_heap_frag_ref_in_heap(Process *p, Eterm *heap, Eterm *htop);
static void disallow_heap_frag_ref_in_old_heap(Process* p);
#endif
@@ -171,11 +179,31 @@ Uint erts_test_long_gc_sleep; /* Only used for testing... */
typedef struct {
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
} ErtsGCInfoReq;
+static struct {
+ erts_mtx_t mtx;
+ ErtsGCInfo info;
+} dirty_gc;
+
+static void move_msgs_to_heap(Process *c_p)
+{
+ Uint64 pre_oh, post_oh;
+
+ pre_oh = c_p->off_heap.overhead;
+ erts_proc_sig_move_msgs_to_heap(c_p);
+ post_oh = c_p->off_heap.overhead;
+
+ if (pre_oh != post_oh) {
+ /* Got new binaries; update bin vheap size... */
+ c_p->bin_vheap_sz = next_vheap_size(c_p, post_oh,
+ c_p->bin_vheap_sz);
+ }
+}
+
static ERTS_INLINE int
gc_cost(Uint gc_moved_live_words, Uint resize_moved_words)
{
@@ -194,6 +222,7 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(gcireq,
ErtsGCInfoReq,
5,
ERTS_ALC_T_GC_INFO_REQ)
+
/*
* Initialize GC global data.
*/
@@ -259,6 +288,10 @@ erts_init_gc(void)
init_gc_info(&esdp->gc_info);
}
+ erts_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ init_gc_info(&dirty_gc.info);
+
init_gcireq_alloc();
}
@@ -321,7 +354,7 @@ erts_heap_sizes(Process* p)
for (i = num_heap_sizes-1; i >= 0; i--) {
n += 2;
- if (!MY_IS_SSMALL(heap_sizes[i])) {
+ if (!IS_SSMALL(heap_sizes[i])) {
big += BIG_UINT_HEAP_SIZE;
}
}
@@ -336,7 +369,7 @@ erts_heap_sizes(Process* p)
Eterm num;
Sint sz = heap_sizes[i];
- if (MY_IS_SSMALL(sz)) {
+ if (IS_SSMALL(sz)) {
num = make_small(sz);
} else {
num = uint_to_big(sz, bigp);
@@ -380,10 +413,16 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
{
int cost;
- if (p->flags & F_HIBERNATE_SCHED) {
+ if (p->flags & (F_HIBERNATE_SCHED|F_HIPE_RECV_LOCKED)) {
/*
* We just hibernated. We do *not* want to mess
* up the hibernation by an ordinary GC...
+ *
+ * OR
+ *
+ * We left a receive in HiPE with message
+ * queue lock locked, and we do not want to
+ * do a GC with message queue locked...
*/
return result;
}
@@ -395,20 +434,20 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
if (is_non_value(result)) {
if (p->freason == TRAP) {
- #if HIPE
+#ifdef HIPE
if (regs == NULL) {
regs = erts_proc_sched_data(p)->x_reg_array;
}
- #endif
- cost = garbage_collect(p, live_hf_end, 0, regs, p->arity, p->fcalls);
+#endif
+ cost = garbage_collect(p, live_hf_end, 0, regs, p->arity, p->fcalls, 0);
} else {
- cost = garbage_collect(p, live_hf_end, 0, regs, arity, p->fcalls);
+ cost = garbage_collect(p, live_hf_end, 0, regs, arity, p->fcalls, 0);
}
} else {
Eterm val[1];
val[0] = result;
- cost = garbage_collect(p, live_hf_end, 0, val, 1, p->fcalls);
+ cost = garbage_collect(p, live_hf_end, 0, val, 1, p->fcalls, 0);
result = val[0];
}
BUMP_REDS(p, cost);
@@ -420,7 +459,7 @@ Eterm
erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
{
return erts_gc_after_bif_call_lhf(p, ERTS_INVALID_HFRAG_PTR,
- result, regs, arity);
+ result, regs, arity);
}
static ERTS_INLINE void reset_active_writer(Process *p)
@@ -460,9 +499,13 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int
p->live_hf_end = live_hf_end;
}
- if (need == 0)
+ if (need == 0) {
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)));
+ goto force_reschedule;
+ }
return 1;
-
+ }
/*
* Satisfy need in a heap fragment...
*/
@@ -477,24 +520,26 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int
hsz = ssz + need + ERTS_DELAY_GC_EXTRA_FREE;
hfrag = new_message_buffer(hsz);
- hfrag->next = p->mbuf;
- p->mbuf = hfrag;
- p->mbuf_sz += hsz;
p->heap = p->htop = &hfrag->mem[0];
p->hend = hend = &hfrag->mem[hsz];
p->stop = stop = hend - ssz;
sys_memcpy((void *) stop, (void *) orig_stop, ssz * sizeof(Eterm));
if (p->abandoned_heap) {
- /* Active heap already in a fragment; adjust it... */
- ErlHeapFragment *hfrag = ((ErlHeapFragment *)
- (((char *) orig_heap)
- - offsetof(ErlHeapFragment, mem)));
- Uint unused = orig_hend - orig_htop;
- ASSERT(hfrag->used_size == hfrag->alloc_size);
- ASSERT(hfrag->used_size >= unused);
- hfrag->used_size -= unused;
- p->mbuf_sz -= unused;
+ /*
+ * Active heap already in a fragment; adjust it and
+ * save it into mbuf list...
+ */
+ ErlHeapFragment *hfrag = ((ErlHeapFragment *)
+ (((char *) orig_heap)
+ - offsetof(ErlHeapFragment, mem)));
+ Uint used = orig_htop - orig_heap;
+ hfrag->used_size = used;
+ p->mbuf_sz += used;
+ ASSERT(hfrag->used_size <= hfrag->alloc_size);
+ ASSERT(!hfrag->off_heap.first && !hfrag->off_heap.overhead);
+ hfrag->next = p->mbuf;
+ p->mbuf = hfrag;
}
else {
/* Do not leave a hole in the abandoned heap... */
@@ -513,6 +558,8 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int
p->heap_hfrag = hfrag;
#endif
+force_reschedule:
+
/* Make sure that we do a proper GC as soon as possible... */
p->flags |= F_FORCE_GC;
reds_left = ERTS_REDS_LEFT(p, fcalls);
@@ -540,33 +587,36 @@ young_gen_usage(Process *p)
hsz = p->mbuf_sz;
if (p->flags & F_ON_HEAP_MSGQ) {
- ErtsMessage *mp;
- for (mp = p->msg.first; mp; mp = mp->next) {
- /*
- * We leave not yet decoded distribution messages
- * as they are in the queue since it is not
- * possible to determine a maximum size until
- * actual decoding. However, we use their estimated
- * size when calculating need, and by this making
- * it more likely that they will fit on the heap
- * when actually decoded.
- */
- if (mp->data.attached)
- hsz += erts_msg_attached_data_size(mp);
- }
- }
-
+ ERTS_FOREACH_SIG_PRIVQS(
+ p, mp,
+ {
+ /*
+ * We leave not yet decoded distribution messages
+ * as they are in the queue since it is not
+ * possible to determine a maximum size until
+ * actual decoding. However, we use their estimated
+ * size when calculating need, and by this making
+ * it more likely that they will fit on the heap
+ * when actually decoded.
+ *
+ * We however ignore off heap messages...
+ */
+ if (ERTS_SIG_IS_MSG(mp)
+ && mp->data.attached
+ && mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
+ hsz += erts_msg_attached_data_size(mp);
+ }
+ });
+ }
+
+ hsz += p->htop - p->heap;
aheap = p->abandoned_heap;
- if (!aheap)
- hsz += p->htop - p->heap;
- else {
+ if (aheap) {
/* used in orig heap */
if (p->flags & F_ABANDONED_HEAP_USE)
hsz += aheap[p->heap_sz-1];
else
hsz += p->heap_sz;
- /* Remove unused part in latest fragment */
- hsz -= p->hend - p->htop;
}
return hsz;
}
@@ -587,6 +637,30 @@ young_gen_usage(Process *p)
} \
} while (0)
+
+static ERTS_INLINE void
+check_for_possibly_long_gc(Process *p, Uint ygen_usage)
+{
+ int major;
+ Uint sz;
+
+ major = (p->flags & F_NEED_FULLSWEEP) || GEN_GCS(p) >= MAX_GEN_GCS(p);
+
+ sz = ygen_usage;
+ sz += p->hend - p->stop;
+ if (p->flags & F_ON_HEAP_MSGQ)
+ sz += erts_proc_sig_privqs_len(p);
+ if (major)
+ sz += p->old_htop - p->old_heap;
+
+ if (sz >= ERTS_POTENTIALLY_LONG_GC_HSIZE) {
+ ASSERT(!(p->flags & (F_DISABLE_GC|F_DELAY_GC)));
+ p->flags |= major ? F_DIRTY_MAJOR_GC : F_DIRTY_MINOR_GC;
+ erts_schedule_dirty_sys_execution(p);
+ }
+}
+
+
/*
* Garbage collect a process.
*
@@ -597,39 +671,49 @@ young_gen_usage(Process *p)
*/
static int
garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, int fcalls)
+ int need, Eterm* objv, int nobj, int fcalls,
+ Uint max_young_gen_usage)
{
Uint reclaimed_now = 0;
+ Uint ygen_usage;
Eterm gc_trace_end_tag;
int reds;
- ErtsMonotonicTime start_time = 0; /* Shut up faulty warning... */
- ErtsSchedulerData *esdp;
+ ErtsMonotonicTime start_time;
+ ErtsSchedulerData *esdp = erts_proc_sched_data(p);
erts_aint32_t state;
- ERTS_MSACC_PUSH_STATE_M();
+ ERTS_MSACC_PUSH_STATE();
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
#endif
+ ERTS_UNDEF(start_time, 0);
ERTS_CHK_MBUF_SZ(p);
- ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls)
- >= erts_proc_sched_data(p)->virtual_reds);
+ ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls) >= esdp->virtual_reds);
- state = erts_smp_atomic32_read_nob(&p->state);
+ state = erts_atomic32_read_nob(&p->state);
- if (p->flags & (F_DISABLE_GC|F_DELAY_GC) || state & ERTS_PSFLG_EXITING)
+ if ((p->flags & (F_DISABLE_GC|F_DELAY_GC)) || state & ERTS_PSFLG_EXITING) {
+ delay_gc_before_start:
return delay_garbage_collection(p, live_hf_end, need, fcalls);
+ }
+
+ ygen_usage = max_young_gen_usage ? max_young_gen_usage : young_gen_usage(p);
+
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ check_for_possibly_long_gc(p, ygen_usage);
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC))
+ goto delay_gc_before_start;
+ }
if (p->abandoned_heap)
live_hf_end = ERTS_INVALID_HFRAG_PTR;
else if (p->live_hf_end != ERTS_INVALID_HFRAG_PTR)
live_hf_end = p->live_hf_end;
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_GC);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_GC);
- esdp = erts_get_scheduler_data();
-
- erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
+ erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
if (erts_system_monitor_long_gc != 0)
start_time = erts_get_monotonic_time(esdp);
@@ -655,26 +739,38 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
trace_gc(p, am_gc_minor_start, need, THE_NON_VALUE);
}
DTRACE2(gc_minor_start, pidbuf, need);
- reds = minor_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now);
+ reds = minor_collection(p, live_hf_end, need, objv, nobj,
+ ygen_usage, &reclaimed_now);
DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
if (reds == -1) {
if (IS_TRACED_FL(p, F_TRACE_GC)) {
trace_gc(p, am_gc_minor_end, reclaimed_now, THE_NON_VALUE);
}
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ p->flags |= F_NEED_FULLSWEEP;
+ check_for_possibly_long_gc(p, ygen_usage);
+ if (p->flags & F_DIRTY_MAJOR_GC)
+ goto delay_gc_after_start;
+ }
goto do_major_collection;
}
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ p->flags &= ~F_DIRTY_MINOR_GC;
gc_trace_end_tag = am_gc_minor_end;
} else {
do_major_collection:
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC_FULL);
+ ERTS_MSACC_SET_STATE_CACHED_X(ERTS_MSACC_STATE_GC_FULL);
if (IS_TRACED_FL(p, F_TRACE_GC)) {
trace_gc(p, am_gc_major_start, need, THE_NON_VALUE);
}
DTRACE2(gc_major_start, pidbuf, need);
- reds = major_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now);
+ reds = major_collection(p, live_hf_end, need, objv, nobj,
+ ygen_usage, &reclaimed_now);
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ p->flags &= ~(F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
DTRACE2(gc_major_end, pidbuf, reclaimed_now);
gc_trace_end_tag = am_gc_major_end;
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC);
+ ERTS_MSACC_SET_STATE_CACHED_X(ERTS_MSACC_STATE_GC);
}
reset_active_writer(p);
@@ -693,25 +789,22 @@ do_major_collection:
long_gc/large_gc triggers when this happens as process was
killed before a GC could be done. */
if (reds == -2) {
- ErtsProcLocks locks = ERTS_PROC_LOCKS_ALL;
int res;
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- erts_send_exit_signal(p, p->common.id, p, &locks,
- am_kill, NIL, NULL, 0);
- erts_smp_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_set_self_exiting(p, am_killed);
+ delay_gc_after_start:
/* erts_send_exit_signal looks for ERTS_PSFLG_GC, so
we have to remove it after the signal is sent */
- erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+ erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
/* We have to make sure that we have space for need on the heap */
res = delay_garbage_collection(p, live_hf_end, need, fcalls);
- ERTS_MSACC_POP_STATE_M();
+ ERTS_MSACC_POP_STATE();
return res;
}
- erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+ erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
if (IS_TRACED_FL(p, F_TRACE_GC)) {
trace_gc(p, gc_trace_end_tag, reclaimed_now, THE_NON_VALUE);
@@ -735,13 +828,22 @@ do_major_collection:
monitor_large_heap(p);
}
- esdp->gc_info.garbage_cols++;
- esdp->gc_info.reclaimed += reclaimed_now;
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ dirty_gc.info.garbage_cols++;
+ dirty_gc.info.reclaimed += reclaimed_now;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+ else
+ {
+ esdp->gc_info.garbage_cols++;
+ esdp->gc_info.reclaimed += reclaimed_now;
+ }
- FLAGS(p) &= ~F_FORCE_GC;
+ FLAGS(p) &= ~(F_FORCE_GC|F_HIBERNATED);
p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
- ERTS_MSACC_POP_STATE_M();
+ ERTS_MSACC_POP_STATE();
#ifdef CHECK_FOR_HOLES
/*
@@ -765,6 +867,7 @@ do_major_collection:
ASSERT(!p->mbuf);
ASSERT(!ERTS_IS_GC_DESIRED(p));
+ ASSERT(need <= HEAP_LIMIT(p) - HEAP_TOP(p));
return reds;
}
@@ -772,7 +875,7 @@ do_major_collection:
int
erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fcalls)
{
- int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0);
int reds_left = ERTS_REDS_LEFT(p, fcalls);
if (reds > reds_left)
reds = reds_left;
@@ -783,18 +886,19 @@ erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fca
void
erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
{
- int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0);
BUMP_REDS(p, reds);
ASSERT(CONTEXT_REDS - ERTS_BIF_REDS_LEFT(p)
>= erts_proc_sched_data(p)->virtual_reds);
}
+
/*
* Place all living data on a the new heap; deallocate any old heap.
* Meant to be used by hibernate/3.
*/
-void
-erts_garbage_collect_hibernate(Process* p)
+static int
+garbage_collect_hibernate(Process* p, int check_long_gc)
{
Uint heap_size;
Eterm* heap;
@@ -808,10 +912,22 @@ erts_garbage_collect_hibernate(Process* p)
if (p->flags & F_DISABLE_GC)
ERTS_INTERNAL_ERROR("GC disabled");
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
+ p->flags &= ~(F_DIRTY_GC_HIBERNATE|F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
+ else if (check_long_gc) {
+ Uint flags = p->flags;
+ p->flags |= F_NEED_FULLSWEEP;
+ check_for_possibly_long_gc(p, (p->htop - p->heap) + p->mbuf_sz);
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ p->flags = flags|F_DIRTY_GC_HIBERNATE;
+ return 1;
+ }
+ p->flags = flags;
+ }
/*
* Preliminaries.
*/
- erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
+ erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
ErtsGcQuickSanityCheck(p);
ASSERT(p->stop == p->hend); /* Stack must be empty. */
@@ -819,7 +935,6 @@ erts_garbage_collect_hibernate(Process* p)
* Do it.
*/
-
heap_size = p->heap_sz + (p->old_htop - p->old_heap) + p->mbuf_sz;
heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_TMP_HEAP,
@@ -827,6 +942,7 @@ erts_garbage_collect_hibernate(Process* p)
htop = heap;
htop = full_sweep_heaps(p,
+ ERTS_INVALID_HFRAG_PTR,
1,
heap,
htop,
@@ -835,11 +951,11 @@ erts_garbage_collect_hibernate(Process* p)
p->arg_reg,
p->arity);
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : p->heap),
- p->heap_sz * sizeof(Eterm));
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, heap, htop);
+#endif
+
+ erts_deallocate_young_generation(p);
p->heap = heap;
p->high_water = htop;
@@ -874,8 +990,6 @@ erts_garbage_collect_hibernate(Process* p)
sys_memcpy((void *) heap, (void *) p->heap, actual_size*sizeof(Eterm));
ERTS_HEAP_FREE(ERTS_ALC_T_TMP_HEAP, p->heap, p->heap_sz*sizeof(Eterm));
- remove_message_buffers(p);
-
p->stop = p->hend = heap + heap_size;
offs = heap - p->heap;
@@ -903,12 +1017,20 @@ erts_garbage_collect_hibernate(Process* p)
ErtsGcQuickSanityCheck(p);
- erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+ p->flags |= F_HIBERNATED;
+
+ erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
reds = gc_cost(actual_size, actual_size);
- BUMP_REDS(p, reds);
+ return reds;
}
+void
+erts_garbage_collect_hibernate(Process* p)
+{
+ int reds = garbage_collect_hibernate(p, 1);
+ BUMP_REDS(p, reds);
+}
/*
* HiPE native code stack scanning procedures:
@@ -962,10 +1084,11 @@ static ERTS_INLINE void offset_nstack(Process* p, Sint offs,
#endif /* HIPE */
-void
+int
erts_garbage_collect_literals(Process* p, Eterm* literals,
Uint byte_lit_size,
- struct erl_off_heap_header* oh)
+ struct erl_off_heap_header* oh,
+ int fcalls)
{
Uint lit_size = byte_lit_size / sizeof(Eterm);
Uint old_heap_size;
@@ -977,20 +1100,70 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
Uint area_size;
Eterm* old_htop;
Uint n;
+ Uint ygen_usage = 0;
struct erl_off_heap_header** prev = NULL;
+ Sint64 reds;
+ int hibernated = !!(p->flags & F_HIBERNATED);
+
+ if (p->flags & (F_DISABLE_GC|F_DELAY_GC))
+ ERTS_INTERNAL_ERROR("GC disabled");
+
+ /*
+ * First an ordinary major collection...
+ */
+
+ p->flags |= F_NEED_FULLSWEEP;
+
+ if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
+ p->flags &= ~F_DIRTY_CLA;
+ else {
+ Uint size = byte_lit_size/sizeof(Uint);
+ ygen_usage = young_gen_usage(p);
+ if (hibernated)
+ size = size*2 + 3*ygen_usage;
+ else
+ size = size + 2*ygen_usage;
+ check_for_possibly_long_gc(p, size);
+ if (p->flags & F_DIRTY_MAJOR_GC) {
+ p->flags |= F_DIRTY_CLA;
+ return 10;
+ }
+ }
+
+ reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, 0,
+ p->arg_reg, p->arity, fcalls,
+ ygen_usage);
+ if (ERTS_PROC_IS_EXITING(p)) {
+ return 0;
+ }
+
+ ASSERT(!(p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)));
+
+ if (MAX_HEAP_SIZE_GET(p)) {
+ Uint new_heap_size;
+ Uint old_heap_size;
+ Uint total_heap_size;
+
+ new_heap_size = HEAP_END(p) - HEAP_START(p);
+ old_heap_size = erts_next_heap_size(lit_size, 0);
+ total_heap_size = new_heap_size + old_heap_size;
+ if (MAX_HEAP_SIZE_GET(p) < total_heap_size &&
+ reached_max_heap_size(p, total_heap_size,
+ new_heap_size, old_heap_size)) {
+ erts_set_self_exiting(p, am_killed);
+ return 0;
+ }
+ }
- if (p->flags & F_DISABLE_GC)
- return;
/*
* Set GC state.
*/
- erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
+ erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
/*
- * We assume that the caller has already done a major collection
- * (which has discarded the old heap), so that we don't have to cope
- * with pointer to literals on the old heap. We will now allocate
- * an old heap to contain the literals.
+ * Just did a major collection (which has discarded the old heap),
+ * so that we don't have to cope with pointer to literals on the
+ * old heap. We will now allocate an old heap to contain the literals.
*/
ASSERT(p->old_heap == 0); /* Must NOT have an old heap yet. */
@@ -1033,7 +1206,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
roots++;
- while (g_sz--) {
+ for ( ; g_sz--; g_ptr++) {
Eterm gval = *g_ptr;
switch (primary_tag(gval)) {
@@ -1042,26 +1215,21 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
val = *ptr;
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
- *g_ptr++ = val;
+ *g_ptr = val;
} else if (ErtsInArea(ptr, area, area_size)) {
- MOVE_BOXED(ptr,val,old_htop,g_ptr++);
- } else {
- g_ptr++;
+ move_boxed(ptr,val,&old_htop,g_ptr);
}
break;
case TAG_PRIMARY_LIST:
ptr = list_val(gval);
val = *ptr;
if (IS_MOVED_CONS(val)) { /* Moved */
- *g_ptr++ = ptr[1];
+ *g_ptr = ptr[1];
} else if (ErtsInArea(ptr, area, area_size)) {
- MOVE_CONS(ptr,val,old_htop,g_ptr++);
- } else {
- g_ptr++;
- }
+ move_cons(ptr,val,&old_htop,g_ptr);
+ }
break;
default:
- g_ptr++;
break;
}
}
@@ -1082,8 +1250,8 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
p->old_htop = old_htop;
/*
- * Prepare to sweep binaries. Since all MSOs on the new heap
- * must be come before MSOs on the old heap, find the end of
+ * Prepare to sweep off-heap objects. Since all MSOs on the new
+ * heap must be come before MSOs on the old heap, find the end of
* current MSO list and use that as a starting point.
*/
@@ -1095,25 +1263,50 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
}
/*
- * Sweep through all binaries in the temporary literal area.
+ * Sweep through all off-heap objects in the temporary literal area.
*/
while (oh) {
if (IS_MOVED_BOXED(oh->thing_word)) {
- Binary* bptr;
struct erl_off_heap_header* ptr;
- ptr = (struct erl_off_heap_header*) boxed_val(oh->thing_word);
- ASSERT(thing_subtag(ptr->thing_word) == REFC_BINARY_SUBTAG);
- bptr = ((ProcBin*)ptr)->val;
-
- /*
- * This binary has been copied to the heap.
+ /*
+ * This off-heap object has been copied to the heap.
* We must increment its reference count and
* link it into the MSO list for the process.
*/
- erts_refc_inc(&bptr->refc, 1);
+ ptr = (struct erl_off_heap_header*) boxed_val(oh->thing_word);
+ switch (thing_subtag(ptr->thing_word)) {
+ case REFC_BINARY_SUBTAG:
+ {
+ Binary* bptr = ((ProcBin*)ptr)->val;
+ erts_refc_inc(&bptr->intern.refc, 1);
+ break;
+ }
+ case FUN_SUBTAG:
+ {
+ ErlFunEntry* fe = ((ErlFunThing*)ptr)->fe;
+ erts_refc_inc(&fe->refc, 1);
+ break;
+ }
+ case REF_SUBTAG:
+ {
+ ErtsMagicBinary *bptr;
+ ASSERT(is_magic_ref_thing(ptr));
+ bptr = ((ErtsMRefThing *) ptr)->mb;
+ erts_refc_inc(&bptr->intern.refc, 1);
+ break;
+ }
+ default:
+ {
+ ExternalThing *etp;
+ ASSERT(is_external_header(ptr->thing_word));
+ etp = (ExternalThing *) ptr;
+ erts_refc_inc(&etp->node->refc, 1);
+ break;
+ }
+ }
*prev = ptr;
prev = &ptr->next;
}
@@ -1132,16 +1325,28 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
/*
* Restore status.
*/
- erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+ erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+
+ reds += (Sint64) gc_cost((p->htop - p->heap) + byte_lit_size/sizeof(Uint), 0);
+
+ if (hibernated) {
+ /* Restore the process into hibernated state... */
+ reds += garbage_collect_hibernate(p, 0);
+ }
+
+ if (reds > INT_MAX)
+ return INT_MAX;
+ return (int) reds;
}
static int
minor_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl)
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl)
{
Eterm *mature = p->abandoned_heap ? p->abandoned_heap : p->heap;
Uint mature_size = p->high_water - mature;
- Uint size_before = young_gen_usage(p);
+ Uint size_before = ygen_usage;
/*
* Check if we have gone past the max heap size limit
@@ -1213,7 +1418,7 @@ minor_collection(Process* p, ErlHeapFragment *live_hf_end,
new_sz, objv, nobj);
if (p->flags & F_ON_HEAP_MSGQ)
- move_msgq_to_heap(p);
+ move_msgs_to_heap(p);
new_mature = p->old_htop - prev_old_htop;
@@ -1314,25 +1519,29 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
n_htop = n_heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
sizeof(Eterm)*new_sz);
+ n = setup_rootset(p, objv, nobj, &rootset);
+ roots = rootset.roots;
+
+ /*
+ * All allocations done. Start defile heap with move markers.
+ * A crash dump due to allocation failure above will see a healthy heap.
+ */
+
if (live_hf_end != ERTS_INVALID_HFRAG_PTR) {
/*
* Move heap frags that we know are completely live
* directly into the new young heap generation.
*/
- n_htop = collect_live_heap_frags(p, live_hf_end, n_heap, n_htop,
- objv, nobj);
+ n_htop = collect_live_heap_frags(p, live_hf_end, n_htop);
}
- n = setup_rootset(p, objv, nobj, &rootset);
- roots = rootset.roots;
-
GENSWEEP_NSTACK(p, old_htop, n_htop);
while (n--) {
Eterm* g_ptr = roots->v;
Uint g_sz = roots->sz;
roots++;
- while (g_sz--) {
+ for ( ; g_sz--; g_ptr++) {
gval = *g_ptr;
switch (primary_tag(gval)) {
@@ -1342,14 +1551,12 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
val = *ptr;
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
- *g_ptr++ = val;
+ *g_ptr = val;
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,g_ptr++);
+ move_boxed(ptr,val,&old_htop,g_ptr);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,g_ptr++);
- } else {
- g_ptr++;
- }
+ move_boxed(ptr,val,&n_htop,g_ptr);
+ }
break;
}
@@ -1357,19 +1564,15 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
ptr = list_val(gval);
val = *ptr;
if (IS_MOVED_CONS(val)) { /* Moved */
- *g_ptr++ = ptr[1];
+ *g_ptr = ptr[1];
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_CONS(ptr,val,old_htop,g_ptr++);
+ move_cons(ptr,val,&old_htop,g_ptr);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_CONS(ptr,val,n_htop,g_ptr++);
- } else {
- g_ptr++;
- }
+ move_cons(ptr,val,&n_htop,g_ptr);
+ }
break;
}
-
default:
- g_ptr++;
break;
}
}
@@ -1403,9 +1606,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,n_hp++);
+ move_boxed(ptr,val,&old_htop,n_hp++);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,n_hp++);
+ move_boxed(ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1417,9 +1620,9 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_CONS(ptr,val,old_htop,n_hp++);
+ move_cons(ptr,val,&old_htop,n_hp++);
} else if (ErtsInYoungGen(gval, ptr, oh, oh_size)) {
- MOVE_CONS(ptr,val,n_htop,n_hp++);
+ move_cons(ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1439,10 +1642,10 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
*origptr = val;
mb->base = binary_bytes(val);
} else if (ErtsInArea(ptr, mature, mature_size)) {
- MOVE_BOXED(ptr,val,old_htop,origptr);
+ move_boxed(ptr,val,&old_htop,origptr);
mb->base = binary_bytes(mb->orig);
} else if (ErtsInYoungGen(*origptr, ptr, oh, oh_size)) {
- MOVE_BOXED(ptr,val,n_htop,origptr);
+ move_boxed(ptr,val,&n_htop,origptr);
mb->base = binary_bytes(mb->orig);
}
}
@@ -1494,22 +1697,16 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
}
#endif
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : HEAP_START(p)),
- HEAP_SIZE(p) * sizeof(Eterm));
- p->abandoned_heap = NULL;
- p->flags &= ~F_ABANDONED_HEAP_USE;
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, n_heap, n_htop);
+#endif
+
+ erts_deallocate_young_generation(p);
+
HEAP_START(p) = n_heap;
HEAP_TOP(p) = n_htop;
HEAP_SIZE(p) = new_sz;
HEAP_END(p) = n_heap + new_sz;
-
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
- remove_message_buffers(p);
}
/*
@@ -1518,7 +1715,8 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
static int
major_collection(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj, Uint *recl)
+ int need, Eterm* objv, int nobj,
+ Uint ygen_usage, Uint *recl)
{
Uint size_before, size_after, stack_size;
Eterm* n_heap;
@@ -1536,7 +1734,7 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
* to receive all live data.
*/
- size_before = young_gen_usage(p);
+ size_before = ygen_usage;
size_before += p->old_htop - p->old_heap;
stack_size = p->hend - p->stop;
@@ -1573,16 +1771,8 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
n_htop = n_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
sizeof(Eterm)*new_sz);
- if (live_hf_end != ERTS_INVALID_HFRAG_PTR) {
- /*
- * Move heap frags that we know are completely live
- * directly into the heap.
- */
- n_htop = collect_live_heap_frags(p, live_hf_end, n_heap, n_htop,
- objv, nobj);
- }
-
- n_htop = full_sweep_heaps(p, 0, n_heap, n_htop, oh, oh_size, objv, nobj);
+ n_htop = full_sweep_heaps(p, live_hf_end, 0, n_heap, n_htop, oh, oh_size,
+ objv, nobj);
/* Move the stack to the end of the heap */
stk_sz = HEAP_END(p) - p->stop;
@@ -1598,13 +1788,12 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
}
#endif
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : HEAP_START(p)),
- p->heap_sz * sizeof(Eterm));
- p->abandoned_heap = NULL;
- p->flags &= ~F_ABANDONED_HEAP_USE;
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, n_heap, n_htop);
+#endif
+
+ erts_deallocate_young_generation(p);
+
HEAP_START(p) = n_heap;
HEAP_TOP(p) = n_htop;
HEAP_SIZE(p) = new_sz;
@@ -1613,13 +1802,8 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
HIGH_WATER(p) = HEAP_TOP(p);
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
- remove_message_buffers(p);
-
if (p->flags & F_ON_HEAP_MSGQ)
- move_msgq_to_heap(p);
+ move_msgs_to_heap(p);
ErtsGcQuickSanityCheck(p);
@@ -1635,6 +1819,7 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
static Eterm *
full_sweep_heaps(Process *p,
+ ErlHeapFragment *live_hf_end,
int hibernate,
Eterm *n_heap, Eterm* n_htop,
char *oh, Uint oh_size,
@@ -1651,6 +1836,19 @@ full_sweep_heaps(Process *p,
n = setup_rootset(p, objv, nobj, &rootset);
+ /*
+ * All allocations done. Start defile heap with move markers.
+ * A crash dump due to allocation failure above will see a healthy heap.
+ */
+
+ if (live_hf_end != ERTS_INVALID_HFRAG_PTR) {
+ /*
+ * Move heap frags that we know are completely live
+ * directly into the heap.
+ */
+ n_htop = collect_live_heap_frags(p, live_hf_end, n_htop);
+ }
+
#ifdef HIPE
if (hibernate)
hipe_empty_nstack(p);
@@ -1664,11 +1862,11 @@ full_sweep_heaps(Process *p,
Eterm g_sz = roots->sz;
roots++;
- while (g_sz--) {
+ for ( ; g_sz--; g_ptr++) {
Eterm* ptr;
Eterm val;
Eterm gval = *g_ptr;
-
+
switch (primary_tag(gval)) {
case TAG_PRIMARY_BOXED: {
@@ -1676,32 +1874,26 @@ full_sweep_heaps(Process *p,
val = *ptr;
if (IS_MOVED_BOXED(val)) {
ASSERT(is_boxed(val));
- *g_ptr++ = val;
+ *g_ptr = val;
} else if (!erts_is_literal(gval, ptr)) {
- MOVE_BOXED(ptr,val,n_htop,g_ptr++);
- } else {
- g_ptr++;
+ move_boxed(ptr,val,&n_htop,g_ptr);
}
- continue;
+ break;
}
case TAG_PRIMARY_LIST: {
ptr = list_val(gval);
val = *ptr;
if (IS_MOVED_CONS(val)) {
- *g_ptr++ = ptr[1];
+ *g_ptr = ptr[1];
} else if (!erts_is_literal(gval, ptr)) {
- MOVE_CONS(ptr,val,n_htop,g_ptr++);
- } else {
- g_ptr++;
+ move_cons(ptr,val,&n_htop,g_ptr);
}
- continue;
+ break;
}
- default: {
- g_ptr++;
- continue;
- }
+ default:
+ break;
}
}
}
@@ -1770,22 +1962,56 @@ adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj)
return adjusted;
}
-/*
- * Remove all message buffers.
- */
-static void
-remove_message_buffers(Process* p)
+void
+erts_deallocate_young_generation(Process *c_p)
{
- if (MBUF(p) != NULL) {
- free_message_buffer(MBUF(p));
- MBUF(p) = NULL;
+ Eterm *orig_heap;
+
+ if (!c_p->abandoned_heap) {
+ orig_heap = c_p->heap;
+ ASSERT(!(c_p->flags & F_ABANDONED_HEAP_USE));
+ }
+ else {
+ ErlHeapFragment *hfrag;
+
+ orig_heap = c_p->abandoned_heap;
+ c_p->abandoned_heap = NULL;
+ c_p->flags &= ~F_ABANDONED_HEAP_USE;
+
+ /*
+ * Temporary heap located in heap fragment
+ * only referred to by 'c_p->heap'. Add it to
+ * 'c_p->mbuf' list and deallocate it as any
+ * other heap fragment...
+ */
+ hfrag = ((ErlHeapFragment *)
+ (((char *) c_p->heap)
+ - offsetof(ErlHeapFragment, mem)));
+
+ ASSERT(!hfrag->off_heap.first);
+ ASSERT(!hfrag->off_heap.overhead);
+ ASSERT(!hfrag->next);
+ ASSERT(c_p->htop - c_p->heap <= hfrag->alloc_size);
+
+ hfrag->next = c_p->mbuf;
+ c_p->mbuf = hfrag;
+ }
+
+ if (c_p->mbuf) {
+ free_message_buffer(c_p->mbuf);
+ c_p->mbuf = NULL;
}
- if (p->msg_frag) {
- erts_cleanup_messages(p->msg_frag);
- p->msg_frag = NULL;
+ if (c_p->msg_frag) {
+ erts_cleanup_messages(c_p->msg_frag);
+ c_p->msg_frag = NULL;
}
- MBUF_SIZE(p) = 0;
+ c_p->mbuf_sz = 0;
+
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
+ orig_heap,
+ c_p->heap_sz * sizeof(Eterm));
}
+
#ifdef HARDDEBUG
/*
@@ -1797,19 +2023,15 @@ remove_message_buffers(Process* p)
*/
static void
-disallow_heap_frag_ref_in_heap(Process* p)
+disallow_heap_frag_ref_in_heap(Process *p, Eterm *heap, Eterm *htop)
{
Eterm* hp;
- Eterm* htop;
- Eterm* heap;
Uint heap_size;
if (p->mbuf == 0) {
return;
}
- htop = p->htop;
- heap = p->heap;
heap_size = (htop - heap)*sizeof(Eterm);
hp = heap;
@@ -1950,7 +2172,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
ASSERT(is_boxed(val));
*n_hp++ = val;
} else if (ERTS_IS_IN_SWEEP_AREA(gval, ptr)) {
- MOVE_BOXED(ptr,val,n_htop,n_hp++);
+ move_boxed(ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1962,7 +2184,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
if (IS_MOVED_CONS(val)) {
*n_hp++ = ptr[1];
} else if (ERTS_IS_IN_SWEEP_AREA(gval, ptr)) {
- MOVE_CONS(ptr,val,n_htop,n_hp++);
+ move_cons(ptr,val,&n_htop,n_hp++);
} else {
n_hp++;
}
@@ -1975,7 +2197,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
if (header_is_bin_matchstate(gval)) {
ErlBinMatchState *ms = (ErlBinMatchState*) n_hp;
ErlBinMatchBuffer *mb = &(ms->mb);
- Eterm* origptr;
+ Eterm* origptr;
origptr = &(mb->orig);
ptr = boxed_val(*origptr);
val = *ptr;
@@ -1983,7 +2205,7 @@ sweep(Eterm *n_hp, Eterm *n_htop,
*origptr = val;
mb->base = binary_bytes(*origptr);
} else if (ERTS_IS_IN_SWEEP_AREA(*origptr, ptr)) {
- MOVE_BOXED(ptr,val,n_htop,origptr);
+ move_boxed(ptr,val,&n_htop,origptr);
mb->base = binary_bytes(*origptr);
}
}
@@ -2046,7 +2268,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
ASSERT(is_boxed(val));
*heap_ptr++ = val;
} else if (ErtsInArea(ptr, src, src_size)) {
- MOVE_BOXED(ptr,val,htop,heap_ptr++);
+ move_boxed(ptr,val,&htop,heap_ptr++);
} else {
heap_ptr++;
}
@@ -2058,7 +2280,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
if (IS_MOVED_CONS(val)) {
*heap_ptr++ = ptr[1];
} else if (ErtsInArea(ptr, src, src_size)) {
- MOVE_CONS(ptr,val,htop,heap_ptr++);
+ move_cons(ptr,val,&htop,heap_ptr++);
} else {
heap_ptr++;
}
@@ -2079,7 +2301,7 @@ sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
*origptr = val;
mb->base = binary_bytes(*origptr);
} else if (ErtsInArea(ptr, src, src_size)) {
- MOVE_BOXED(ptr,val,htop,origptr);
+ move_boxed(ptr,val,&htop,origptr);
mb->base = binary_bytes(*origptr);
}
}
@@ -2112,11 +2334,11 @@ move_one_area(Eterm* n_htop, char* src, Uint src_size)
ASSERT(val != ERTS_HOLE_MARKER);
if (is_header(val)) {
ASSERT(ptr + header_arity(val) < end);
- MOVE_BOXED(ptr, val, n_htop, &dummy_ref);
+ ptr = move_boxed(ptr, val, &n_htop, &dummy_ref);
}
else { /* must be a cons cell */
ASSERT(ptr+1 < end);
- MOVE_CONS(ptr, val, n_htop, &dummy_ref);
+ move_cons(ptr, val, &n_htop, &dummy_ref);
ptr += 2;
}
}
@@ -2129,9 +2351,7 @@ move_one_area(Eterm* n_htop, char* src, Uint src_size)
*/
static Eterm*
-collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end,
- Eterm* n_hstart, Eterm* n_htop,
- Eterm* objv, int nobj)
+collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end, Eterm* n_htop)
{
ErlHeapFragment* qb;
char* frag_begin;
@@ -2155,9 +2375,9 @@ collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end,
return n_htop;
}
-static ERTS_INLINE void
-copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
- ErlHeapFragment *bp, Eterm *refs, int nrefs)
+void
+erts_copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
+ ErlHeapFragment *bp, Eterm *refs, int nrefs)
{
Uint sz;
int i;
@@ -2182,32 +2402,27 @@ copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
*hp++ = val;
break;
case TAG_PRIMARY_LIST:
-#ifdef SHCOPY_SEND
if (erts_is_literal(val,list_val(val))) {
*hp++ = val;
} else {
*hp++ = offset_ptr(val, offs);
}
-#else
- *hp++ = offset_ptr(val, offs);
-#endif
break;
case TAG_PRIMARY_BOXED:
-#ifdef SHCOPY_SEND
if (erts_is_literal(val,boxed_val(val))) {
*hp++ = val;
} else {
*hp++ = offset_ptr(val, offs);
}
-#else
- *hp++ = offset_ptr(val, offs);
-#endif
break;
case TAG_PRIMARY_HEADER:
*hp++ = val;
switch (val & _HEADER_SUBTAG_MASK) {
case ARITYVAL_SUBTAG:
break;
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(fhp - 1))
+ goto the_default;
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
case EXTERNAL_PID_SUBTAG:
@@ -2217,6 +2432,7 @@ copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
cpy_sz = thing_arityval(val);
goto cpy_words;
default:
+ the_default:
cpy_sz = header_arity(val);
cpy_words:
@@ -2261,70 +2477,20 @@ copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
*hpp = hp;
for (i = 0; i < nrefs; i++) {
- if (is_not_immed(refs[i]))
+ if (is_not_immed(refs[i]) && !erts_is_literal(refs[i],boxed_val(refs[i])))
refs[i] = offset_ptr(refs[i], offs);
}
bp->off_heap.first = NULL;
}
-static void
-move_msgq_to_heap(Process *p)
-{
-
- ErtsMessage **mpp = &p->msg.first;
- Uint64 pre_oh = MSO(p).overhead;
-
- while (*mpp) {
- ErtsMessage *mp = *mpp;
-
- if (mp->data.attached) {
- ErlHeapFragment *bp;
-
- /*
- * We leave not yet decoded distribution messages
- * as they are in the queue since it is not
- * possible to determine a maximum size until
- * actual decoding...
- */
- if (is_value(ERL_MESSAGE_TERM(mp))) {
-
- bp = erts_message_to_heap_frag(mp);
-
- if (bp->next)
- erts_move_multi_frags(&p->htop, &p->off_heap, bp,
- mp->m, ERL_MESSAGE_REF_ARRAY_SZ, 0);
- else
- copy_one_frag(&p->htop, &p->off_heap, bp,
- mp->m, ERL_MESSAGE_REF_ARRAY_SZ);
-
- if (mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
- mp->data.heap_frag = NULL;
- free_message_buffer(bp);
- }
- else {
- ErtsMessage *new_mp = erts_alloc_message(0, NULL);
- sys_memcpy((void *) new_mp->m, (void *) mp->m,
- sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ);
- erts_msgq_replace_msg_ref(&p->msg, new_mp, mpp);
- mp->next = NULL;
- erts_cleanup_messages(mp);
- mp = new_mp;
- }
- }
- }
-
- mpp = &(*mpp)->next;
- }
-
- if (pre_oh != MSO(p).overhead) {
- /* Got new binaries; update vheap size... */
- BIN_VHEAP_SZ(p) = next_vheap_size(p, MSO(p).overhead, BIN_VHEAP_SZ(p));
- }
-}
-
static Uint
setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
{
+ /*
+ * NOTE!
+ * Remember to update offset_rootset() when changing
+ * this function.
+ */
Roots* roots;
Uint n;
@@ -2387,17 +2553,10 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
}
/*
- * If a NIF has saved arguments, they need to be added
+ * If a NIF or BIF has saved arguments, they need to be added
*/
- if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) {
- Eterm* argv;
- int argc;
- if (erts_setup_nif_gc(p, &argv, &argc)) {
- roots[n].v = argv;
- roots[n].sz = argc;
- n++;
- }
- }
+ if (erts_setup_nif_export_rootset(p, &roots[n].v, &roots[n].sz))
+ n++;
ASSERT(n <= rootset->size);
@@ -2408,15 +2567,17 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
break;
case F_OFF_HEAP_MSGQ_CHNG:
case 0: {
+ Sint len;
/*
* We do not have off heap message queue enabled, i.e. we
- * need to add message queue to rootset...
+ * need to add signal queues to rootset...
*/
- ErtsMessage *mp;
+
+ len = erts_proc_sig_privqs_len(p);
/* Ensure large enough rootset... */
- if (n + p->msg.len > rootset->size) {
- Uint new_size = n + p->msg.len;
+ if (n + len > rootset->size) {
+ Uint new_size = n + len;
ERTS_GC_ASSERT(roots == rootset->def);
roots = erts_alloc(ERTS_ALC_T_ROOTSET,
new_size*sizeof(Roots));
@@ -2424,18 +2585,19 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
rootset->size = new_size;
}
- for (mp = p->msg.first; mp; mp = mp->next) {
-
- if (!mp->data.attached) {
- /*
- * Message may refer data on heap;
- * add it to rootset...
- */
- roots[n].v = mp->m;
- roots[n].sz = ERL_MESSAGE_REF_ARRAY_SZ;
- n++;
- }
- }
+ ERTS_FOREACH_SIG_PRIVQS(
+ p, mp,
+ {
+ if (ERTS_SIG_IS_INTERNAL_MSG(mp) && !mp->data.attached) {
+ /*
+ * Message may refer data on heap;
+ * add it to rootset...
+ */
+ roots[n].v = mp->m;
+ roots[n].sz = ERL_MESSAGE_REF_ARRAY_SZ;
+ n++;
+ }
+ });
break;
}
}
@@ -2652,6 +2814,16 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
*prevppp = &pbp->next;
}
+#ifdef ERTS_MAGIC_REF_THING_HEADER
+/*
+ * ERTS_MAGIC_REF_THING_HEADER only defined when there
+ * is a size difference between magic and ordinary references...
+ */
+# define ERTS_USED_MAGIC_REF_THING_HEADER__ ERTS_MAGIC_REF_THING_HEADER
+#else
+# define ERTS_USED_MAGIC_REF_THING_HEADER__ ERTS_REF_THING_HEADER
+#endif
+
static void
sweep_off_heap(Process *p, int fullsweep)
@@ -2676,7 +2848,7 @@ sweep_off_heap(Process *p, int fullsweep)
prev = &MSO(p).first;
ptr = MSO(p).first;
- /* Firts part of the list will reside on the (old) new-heap.
+ /* First part of the list will reside on the (old) new-heap.
* Keep if moved, otherwise deref.
*/
while (ptr) {
@@ -2684,7 +2856,8 @@ sweep_off_heap(Process *p, int fullsweep)
ASSERT(!ErtsInArea(ptr, oheap, oheap_sz));
*prev = ptr = (struct erl_off_heap_header*) boxed_val(ptr->thing_word);
ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
- if (ptr->thing_word == HEADER_PROC_BIN) {
+ switch (ptr->thing_word) {
+ case HEADER_PROC_BIN: {
int to_new_heap = !ErtsInArea(ptr, oheap, oheap_sz);
ASSERT(to_new_heap == !seen_mature || (!to_new_heap && (seen_mature=1)));
if (to_new_heap) {
@@ -2693,21 +2866,34 @@ sweep_off_heap(Process *p, int fullsweep)
BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
}
link_live_proc_bin(&shrink, &prev, &ptr, to_new_heap);
- }
- else {
+ break;
+ }
+ case ERTS_USED_MAGIC_REF_THING_HEADER__: {
+ Uint size;
+ int to_new_heap = !ErtsInArea(ptr, oheap, oheap_sz);
+ ASSERT(is_magic_ref_thing(ptr));
+ ASSERT(to_new_heap == !seen_mature || (!to_new_heap && (seen_mature=1)));
+ size = (Uint) ((ErtsMRefThing *) ptr)->mb->orig_size;
+ if (to_new_heap)
+ bin_vheap += size / sizeof(Eterm);
+ else
+ BIN_OLD_VHEAP(p) += size / sizeof(Eterm); /* for binary gc (words)*/
+ /* fall through... */
+ }
+ default:
prev = &ptr->next;
ptr = ptr->next;
}
}
- else if (!ErtsInArea(ptr, oheap, oheap_sz)) {
+ else if (ErtsInArea(ptr, oheap, oheap_sz))
+ break; /* and let old-heap loop continue */
+ else {
/* garbage */
switch (thing_subtag(ptr->thing_word)) {
case REFC_BINARY_SUBTAG:
{
Binary* bptr = ((ProcBin*)ptr)->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0) {
- erts_bin_free(bptr);
- }
+ erts_bin_release(bptr);
break;
}
case FUN_SUBTAG:
@@ -2718,13 +2904,20 @@ sweep_off_heap(Process *p, int fullsweep)
}
break;
}
+ case REF_SUBTAG:
+ {
+ ErtsMagicBinary *bptr;
+ ASSERT(is_magic_ref_thing(ptr));
+ bptr = ((ErtsMRefThing *) ptr)->mb;
+ erts_bin_release((Binary *) bptr);
+ break;
+ }
default:
ASSERT(is_external_header(ptr->thing_word));
erts_deref_node_entry(((ExternalThing*)ptr)->node);
}
*prev = ptr = ptr->next;
}
- else break; /* and let old-heap loop continue */
}
/* The rest of the list resides on old-heap, and we just did a
@@ -2733,15 +2926,24 @@ sweep_off_heap(Process *p, int fullsweep)
while (ptr) {
ASSERT(ErtsInArea(ptr, oheap, oheap_sz));
ASSERT(!IS_MOVED_BOXED(ptr->thing_word));
- if (ptr->thing_word == HEADER_PROC_BIN) {
+ switch (ptr->thing_word) {
+ case HEADER_PROC_BIN:
BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
link_live_proc_bin(&shrink, &prev, &ptr, 0);
- }
- else {
- ASSERT(is_fun_header(ptr->thing_word) ||
- is_external_header(ptr->thing_word));
- prev = &ptr->next;
- ptr = ptr->next;
+ break;
+ case ERTS_USED_MAGIC_REF_THING_HEADER__:
+ ASSERT(is_magic_ref_thing(ptr));
+ BIN_OLD_VHEAP(p) +=
+ (((Uint) ((ErtsMRefThing *) ptr)->mb->orig_size)
+ / sizeof(Eterm)); /* for binary gc (words)*/
+ /* fall through... */
+ default:
+ ASSERT(is_fun_header(ptr->thing_word) ||
+ is_external_header(ptr->thing_word)
+ || is_magic_ref_thing(ptr));
+ prev = &ptr->next;
+ ptr = ptr->next;
+ break;
}
}
@@ -2834,6 +3036,9 @@ offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
}
tari = thing_arityval(val);
switch (thing_subtag(val)) {
+ case REF_SUBTAG:
+ if (is_ordinary_ref_thing(hp))
+ break;
case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
case EXTERNAL_PID_SUBTAG:
@@ -2903,52 +3108,62 @@ offset_off_heap(Process* p, Sint offs, char* area, Uint area_size)
}
}
+#ifndef USE_VM_PROBES
+#define ERTS_OFFSET_DT_UTAG(MP, A, ASZ, O)
+#else
+#define ERTS_OFFSET_DT_UTAG(MP, A, ASZ, O) \
+ do { \
+ Eterm utag__ = ERL_MESSAGE_DT_UTAG((MP)); \
+ if (is_boxed(utag__) && ErtsInArea(ptr_val(utag__), (A), (ASZ))) { \
+ ERL_MESSAGE_DT_UTAG((MP)) = offset_ptr(utag__, (O)); \
+ } \
+ } while (0)
+#endif
+
+static ERTS_INLINE void
+offset_message(ErtsMessage *mp, Sint offs, char* area, Uint area_size)
+{
+ Eterm mesg = ERL_MESSAGE_TERM(mp);
+ if (ERTS_SIG_IS_MSG_TAG(mesg)) {
+ if (ERTS_SIG_IS_INTERNAL_MSG_TAG(mesg)) {
+ switch (primary_tag(mesg)) {
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ if (ErtsInArea(ptr_val(mesg), area, area_size)) {
+ ERL_MESSAGE_TERM(mp) = offset_ptr(mesg, offs);
+ }
+ break;
+ }
+ }
+ mesg = ERL_MESSAGE_TOKEN(mp);
+ if (is_boxed(mesg) && ErtsInArea(ptr_val(mesg), area, area_size)) {
+ ERL_MESSAGE_TOKEN(mp) = offset_ptr(mesg, offs);
+ }
+
+ ERTS_OFFSET_DT_UTAG(mp, area, area_size, offs);
+
+ ASSERT((is_nil(ERL_MESSAGE_TOKEN(mp)) ||
+ is_tuple(ERL_MESSAGE_TOKEN(mp)) ||
+ is_atom(ERL_MESSAGE_TOKEN(mp))));
+ }
+}
+
/*
* Offset pointers in message queue.
*/
static void
offset_mqueue(Process *p, Sint offs, char* area, Uint area_size)
{
- ErtsMessage* mp = p->msg.first;
-
- if ((p->flags & (F_OFF_HEAP_MSGQ|F_OFF_HEAP_MSGQ_CHNG)) != F_OFF_HEAP_MSGQ) {
-
- while (mp != NULL) {
- Eterm mesg = ERL_MESSAGE_TERM(mp);
- if (is_value(mesg)) {
- switch (primary_tag(mesg)) {
- case TAG_PRIMARY_LIST:
- case TAG_PRIMARY_BOXED:
- if (ErtsInArea(ptr_val(mesg), area, area_size)) {
- ERL_MESSAGE_TERM(mp) = offset_ptr(mesg, offs);
- }
- break;
- }
- }
- mesg = ERL_MESSAGE_TOKEN(mp);
- if (is_boxed(mesg) && ErtsInArea(ptr_val(mesg), area, area_size)) {
- ERL_MESSAGE_TOKEN(mp) = offset_ptr(mesg, offs);
- }
-#ifdef USE_VM_PROBES
- mesg = ERL_MESSAGE_DT_UTAG(mp);
- if (is_boxed(mesg) && ErtsInArea(ptr_val(mesg), area, area_size)) {
- ERL_MESSAGE_DT_UTAG(mp) = offset_ptr(mesg, offs);
- }
-#endif
-
- ASSERT((is_nil(ERL_MESSAGE_TOKEN(mp)) ||
- is_tuple(ERL_MESSAGE_TOKEN(mp)) ||
- is_atom(ERL_MESSAGE_TOKEN(mp))));
- mp = mp->next;
- }
-
- }
+ if ((p->flags & (F_OFF_HEAP_MSGQ|F_OFF_HEAP_MSGQ_CHNG)) != F_OFF_HEAP_MSGQ)
+ ERTS_FOREACH_SIG_PRIVQS(p, mp, offset_message(mp, offs, area, area_size));
}
static void ERTS_INLINE
offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
- Eterm* objv, int nobj)
+ Eterm* objv, int nobj)
{
+ Eterm *v;
+ Uint sz;
if (p->dictionary) {
offset_heap(ERTS_PD_START(p->dictionary),
ERTS_PD_SIZE(p->dictionary),
@@ -2969,6 +3184,8 @@ offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
offset_heap_ptr(objv, nobj, offs, area, area_size);
}
offset_off_heap(p, offs, area, area_size);
+ if (erts_setup_nif_export_rootset(p, &v, &sz))
+ offset_heap_ptr(v, sz, offs, area, area_size);
}
static void
@@ -3006,6 +3223,16 @@ reply_gc_info(void *vgcirp)
reclaimed = esdp->gc_info.reclaimed;
garbage_cols = esdp->gc_info.garbage_cols;
+ /*
+ * Add dirty schedulers info on requesting
+ * schedulers info
+ */
+ if (gcirp->req_sched == esdp->no) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ reclaimed += dirty_gc.info.reclaimed;
+ garbage_cols += dirty_gc.info.garbage_cols;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
sz = 0;
hpp = NULL;
@@ -3015,7 +3242,7 @@ reply_gc_info(void *vgcirp)
if (hpp)
ref_copy = STORE_NC(hpp, ohp, gcirp->ref);
else
- *szp += REF_THING_SIZE;
+ *szp += ERTS_REF_THING_SIZE;
msg = erts_bld_tuple(hpp, szp, 3,
make_small(esdp->no),
@@ -3038,14 +3265,46 @@ reply_gc_info(void *vgcirp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
erts_proc_dec_refc(rp);
- if (erts_smp_atomic32_dec_read_nob(&gcirp->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&gcirp->refc) == 0)
gcireq_free(vgcirp);
}
+Eterm* erts_sub_binary_to_heap_binary(Eterm *ptr, Eterm **hpp, Eterm *orig) {
+ Eterm *htop = *hpp;
+ Eterm gval;
+ ErlSubBin *sb = (ErlSubBin *)ptr;
+ ErlHeapBin *hb = (ErlHeapBin *)htop;
+ Eterm *real_bin;
+ byte *bs;
+
+ real_bin = binary_val(follow_moved(sb->orig, (Eterm)0));
+
+ if (*real_bin == HEADER_PROC_BIN) {
+ bs = ((ProcBin *) real_bin)->bytes + sb->offs;
+ } else {
+ bs = (byte *)(&(((ErlHeapBin *) real_bin)->data)) + sb->offs;
+ }
+
+ hb->thing_word = header_heap_bin(sb->size);
+ hb->size = sb->size;
+ sys_memcpy((byte *)hb->data, bs, sb->size);
+
+ gval = make_boxed(htop);
+ *orig = gval;
+ *ptr = gval;
+
+ ptr += ERL_SUB_BIN_SIZE;
+ htop += heap_bin_size(sb->size);
+
+ *hpp = htop;
+ return ptr;
+}
+
+
Eterm
erts_gc_info_request(Process *c_p)
{
@@ -3061,18 +3320,16 @@ erts_gc_info_request(Process *c_p)
gcirp->proc = c_p;
gcirp->ref = STORE_NC(&hp, NULL, ref);
gcirp->req_sched = esdp->no;
- erts_smp_atomic32_init_nob(&gcirp->refc,
+ erts_atomic32_init_nob(&gcirp->refc,
(erts_aint32_t) erts_no_schedulers);
erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_gc_info,
(void *) gcirp);
-#endif
reply_gc_info((void *) gcirp);
@@ -3120,7 +3377,6 @@ erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp,
};
Eterm res = THE_NON_VALUE;
- ErtsMessage *mp;
ERTS_CT_ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(*tags));
ERTS_CT_ASSERT(sizeof(values)/sizeof(*values) == ERTS_PROCESS_GC_INFO_MAX_TERMS);
@@ -3139,9 +3395,15 @@ erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp,
be the same as adding old_heap_block_size + heap_block_size
+ mbuf_size.
*/
- for (mp = p->msg.first; mp; mp = mp->next)
- if (mp->data.attached)
- values[2] += erts_msg_attached_data_size(mp);
+ ERTS_FOREACH_SIG_PRIVQS(
+ p, mp,
+ {
+ if (ERTS_SIG_IS_MSG(mp)
+ && mp->data.attached
+ && mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
+ values[2] += erts_msg_attached_data_size(mp);
+ }
+ });
}
res = erts_bld_atom_uword_2tup_list(hpp,
@@ -3215,7 +3477,7 @@ erts_max_heap_size_map(Sint max_heap_size, Uint max_heap_flags,
Eterm **hpp, Uint *sz)
{
if (!hpp) {
- *sz += (2*3 + 1 + MAP_HEADER_FLATMAP_SZ);
+ *sz += ERTS_MAX_HEAP_SIZE_MAP_SZ;
return THE_NON_VALUE;
} else {
Eterm *hp = *hpp;
@@ -3279,20 +3541,22 @@ erts_max_heap_size(Eterm arg, Uint *max_heap_size, Uint *max_heap_flags)
#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
-static int
-within2(Eterm *ptr, Process *p, Eterm *real_htop)
+int
+erts_dbg_within_proc(Eterm *ptr, Process *p, Eterm *real_htop)
{
ErlHeapFragment* bp;
ErtsMessage* mp;
Eterm *htop, *heap;
- if (p->abandoned_heap)
+ if (p->abandoned_heap) {
ERTS_GET_ORIG_HEAP(p, heap, htop);
- else {
- heap = p->heap;
- htop = real_htop ? real_htop : HEAP_TOP(p);
+ if (heap <= ptr && ptr < htop)
+ return 1;
}
+ heap = p->heap;
+ htop = real_htop ? real_htop : HEAP_TOP(p);
+
if (OLD_HEAP(p) && (OLD_HEAP(p) <= ptr && ptr < OLD_HEND(p))) {
return 1;
}
@@ -3324,12 +3588,6 @@ within2(Eterm *ptr, Process *p, Eterm *real_htop)
return 0;
}
-int
-within(Eterm *ptr, Process *p)
-{
- return within2(ptr, p, NULL);
-}
-
#endif
#ifdef ERTS_OFFHEAP_DEBUG
@@ -3360,7 +3618,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
erts_aint_t refc;
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- refc = erts_refc_read(&u.pb->val->refc, 1);
+ refc = erts_refc_read(&u.pb->val->intern.refc, 1);
break;
case FUN_SUBTAG:
refc = erts_refc_read(&u.fun->fe->refc, 1);
@@ -3370,6 +3628,10 @@ erts_check_off_heap2(Process *p, Eterm *htop)
case EXTERNAL_REF_SUBTAG:
refc = erts_refc_read(&u.ext->node->refc, 1);
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ refc = erts_refc_read(&u.mref->mb->intern.refc, 1);
+ break;
default:
ASSERT(!"erts_check_off_heap2: Invalid thing_word");
}
@@ -3384,7 +3646,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
else if (oheap <= u.ep && u.ep < ohtop)
old = 1;
else {
- ERTS_CHK_OFFHEAP_ASSERT(within2(u.ep, p, htop));
+ ERTS_CHK_OFFHEAP_ASSERT(erts_dbg_within_proc(u.ep, p, htop));
}
}
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 54ea9ca3c0..b9b1ed728c 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,52 +25,74 @@
/* GC declarations shared by beam/erl_gc.c and hipe/hipe_gc.c */
-#include "erl_map.h"
+#define ERTS_POTENTIALLY_LONG_GC_HSIZE (128*1024) /* Words */
-#if defined(DEBUG) && !ERTS_GLB_INLINE_INCL_FUNC_DEF
-# define HARDDEBUG 1
-#endif
+#include "erl_map.h"
+#include "erl_fun.h"
+#include "erl_bits.h"
#define IS_MOVED_BOXED(x) (!is_header((x)))
#define IS_MOVED_CONS(x) (is_non_value((x)))
+Eterm* erts_sub_binary_to_heap_binary(Eterm *ptr, Eterm **hpp, Eterm *orig);
-#define MOVE_CONS(PTR,CAR,HTOP,ORIG) \
-do { \
- Eterm gval; \
- \
- HTOP[0] = CAR; /* copy car */ \
- HTOP[1] = PTR[1]; /* copy cdr */ \
- gval = make_list(HTOP); /* new location */ \
- *ORIG = gval; /* redirect original reference */ \
- PTR[0] = THE_NON_VALUE; /* store forwarding indicator */ \
- PTR[1] = gval; /* store forwarding address */ \
- HTOP += 2; /* update tospace htop */ \
-} while(0)
-
-#define MOVE_BOXED(PTR,HDR,HTOP,ORIG) \
-do { \
- Eterm gval; \
- Sint nelts; \
- \
- ASSERT(is_header(HDR)); \
- nelts = header_arity(HDR); \
- switch ((HDR) & _HEADER_SUBTAG_MASK) { \
- case SUB_BINARY_SUBTAG: nelts++; break; \
- case MAP_SUBTAG: \
- if (is_flatmap_header(HDR)) nelts+=flatmap_get_size(PTR) + 1; \
- else nelts += hashmap_bitcount(MAP_HEADER_VAL(HDR)); \
- break; \
- case FUN_SUBTAG: nelts+=((ErlFunThing*)(PTR))->num_free+1; break; \
- } \
- gval = make_boxed(HTOP); \
- *ORIG = gval; \
- *HTOP++ = HDR; \
- *PTR++ = gval; \
- while (nelts--) *HTOP++ = *PTR++; \
-} while(0)
+ERTS_GLB_INLINE void move_cons(Eterm *ERTS_RESTRICT ptr, Eterm car, Eterm **hpp,
+ Eterm *orig);
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void move_cons(Eterm *ERTS_RESTRICT ptr, Eterm car, Eterm **hpp,
+ Eterm *orig)
+{
+ Eterm *ERTS_RESTRICT htop = *hpp;
+ Eterm gval;
+
+ htop[0] = car; /* copy car */
+ htop[1] = ptr[1]; /* copy cdr */
+ gval = make_list(htop); /* new location */
+ *orig = gval; /* redirect original reference */
+ ptr[0] = THE_NON_VALUE; /* store forwarding indicator */
+ ptr[1] = gval; /* store forwarding address */
+ *hpp += 2; /* update tospace htop */
+}
+#endif
-#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
-int within(Eterm *ptr, Process *p);
+ERTS_GLB_INLINE Eterm* move_boxed(Eterm *ERTS_RESTRICT ptr, Eterm hdr, Eterm **hpp,
+ Eterm *orig);
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Eterm* move_boxed(Eterm *ERTS_RESTRICT ptr, Eterm hdr, Eterm **hpp,
+ Eterm *orig)
+{
+ Eterm gval;
+ Sint nelts;
+ Eterm *ERTS_RESTRICT htop = *hpp;
+
+ ASSERT(is_header(hdr));
+ nelts = header_arity(hdr);
+ switch ((hdr) & _HEADER_SUBTAG_MASK) {
+ case SUB_BINARY_SUBTAG:
+ {
+ ErlSubBin *sb = (ErlSubBin *)ptr;
+ /* convert sub-binary to heap-binary if applicable */
+ if (sb->bitsize == 0 && sb->bitoffs == 0 &&
+ sb->is_writable == 0 && sb->size <= sizeof(Eterm) * 3) {
+ return erts_sub_binary_to_heap_binary(ptr, hpp, orig);
+ }
+ }
+ nelts++;
+ break;
+ case MAP_SUBTAG:
+ if (is_flatmap_header(hdr)) nelts+=flatmap_get_size(ptr) + 1;
+ else nelts += hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ break;
+ case FUN_SUBTAG: nelts+=((ErlFunThing*)(ptr))->num_free+1; break;
+ }
+ gval = make_boxed(htop);
+ *orig = gval;
+ *htop++ = hdr;
+ *ptr++ = gval;
+ while (nelts--) *htop++ = *ptr++;
+
+ *hpp = htop;
+ return ptr;
+}
#endif
#define ErtsInYoungGen(TPtr, Ptr, OldHeap, OldHeapSz) \
@@ -132,6 +154,8 @@ typedef struct {
Uint64 garbage_cols;
} ErtsGCInfo;
+#define ERTS_MAX_HEAP_SIZE_MAP_SZ (2*3 + 1 + MAP_HEADER_FLATMAP_SZ)
+
#define ERTS_PROCESS_GC_INFO_MAX_TERMS (11) /* number of elements in process_gc_info*/
#define ERTS_PROCESS_GC_INFO_MAX_SIZE \
(ERTS_PROCESS_GC_INFO_MAX_TERMS * (2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE))
@@ -145,9 +169,10 @@ void erts_garbage_collect_hibernate(struct process* p);
Eterm erts_gc_after_bif_call_lhf(struct process* p, ErlHeapFragment *live_hf_end,
Eterm result, Eterm* regs, Uint arity);
Eterm erts_gc_after_bif_call(struct process* p, Eterm result, Eterm* regs, Uint arity);
-void erts_garbage_collect_literals(struct process* p, Eterm* literals,
- Uint lit_size,
- struct erl_off_heap_header* oh);
+int erts_garbage_collect_literals(struct process* p, Eterm* literals,
+ Uint lit_size,
+ struct erl_off_heap_header* oh,
+ int fcalls);
Uint erts_next_heap_size(Uint, Uint);
Eterm erts_heap_sizes(struct process* p);
@@ -157,5 +182,11 @@ void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
void erts_free_heap_frags(struct process* p);
Eterm erts_max_heap_size_map(Sint, Uint, Eterm **, Uint *);
int erts_max_heap_size(Eterm, Uint *, Uint *);
+void erts_deallocate_young_generation(Process *c_p);
+void erts_copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap,
+ ErlHeapFragment *bp, Eterm *refs, int nrefs);
+#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
+int erts_dbg_within_proc(Eterm *ptr, Process *p, Eterm* real_htop);
+#endif
#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c
index 223ba193da..01d4aa54ff 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.c
+++ b/erts/emulator/beam/erl_goodfit_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -168,8 +168,9 @@ static Block_t * get_free_block (Allctr_t *, Uint,
static void link_free_block (Allctr_t *, Block_t *);
static void unlink_free_block (Allctr_t *, Block_t *);
static void update_last_aux_mbc (Allctr_t *, Carrier_t *);
-static Eterm info_options (Allctr_t *, char *, int *,
+static Eterm info_options (Allctr_t *, char *, fmtfn_t *,
void *, Uint **, Uint *);
+static int gfalc_try_set_dyn_param(Allctr_t*, Eterm param, Uint value);
static void init_atoms (void);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
@@ -250,6 +251,8 @@ erts_gfalc_start(GFAllctr_t *gfallctr,
if (!erts_alcu_start(allctr, init))
return NULL;
+ allctr->try_set_dyn_param = gfalc_try_set_dyn_param;
+
if (allctr->min_block_size != MIN_BLK_SZ)
return NULL;
@@ -505,7 +508,7 @@ static struct {
static void ERTS_INLINE atom_init(Eterm *atom, char *name)
{
- *atom = am_atom_put(name, strlen(name));
+ *atom = am_atom_put(name, sys_strlen(name));
}
#define AM_INIT(AM) atom_init(&am.AM, #AM)
@@ -551,7 +554,7 @@ add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
static Eterm
info_options(Allctr_t *allctr,
char *prefix,
- int *print_to_p,
+ fmtfn_t *print_to_p,
void *print_to_arg,
Uint **hpp,
Uint *szp)
@@ -584,6 +587,15 @@ info_options(Allctr_t *allctr,
return res;
}
+static int gfalc_try_set_dyn_param(Allctr_t* allctr, Eterm param, Uint value)
+{
+ if (param == am_sbct) {
+ /* Cannot change 'sbct' without rearranging buckets */
+ return 0;
+ }
+ return erts_alcu_try_set_dyn_param(allctr, param, value);
+}
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* NOTE: erts_gfalc_test() is only supposed to be used for testing. *
* *
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index f1bef28186..ef7a55fa38 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2015-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2015-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -29,6 +29,8 @@
# include "config.h"
#endif
+/* #define ERTS_MAGIC_REF_BIF_TIMERS */
+
#include "sys.h"
#include "global.h"
#include "bif.h"
@@ -36,6 +38,10 @@
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
#include "erl_hl_timer.h"
+#include "erl_proc_sig_queue.h"
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+#include "erl_binary.h"
+#endif
#define ERTS_TMR_CHECK_CANCEL_ON_CREATE 0
@@ -91,24 +97,14 @@ typedef enum {
#define ERTS_BIF_TIMER_SHORT_TIME 5000
-#ifdef ERTS_SMP
-# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore \
- ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore)
-#else
-# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore
-#endif
-
-/* Bit 0 to 9 contains scheduler id (see mask below) */
-#define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 10)
-#define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 11)
-#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 12)
-#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 13)
-#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 14)
-#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 15)
-#define ERTS_TMR_ROFLG_CALLBACK (((Uint32) 1) << 16)
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-#define ERTS_TMR_ROFLG_ABIF_TMR (((Uint32) 1) << 17)
-#endif
+/* Bit 0 to 10 contains scheduler id (see mask below) */
+#define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 11)
+#define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 12)
+#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 13)
+#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 14)
+#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 15)
+#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 16)
+#define ERTS_TMR_ROFLG_CALLBACK (((Uint32) 1) << 17)
#define ERTS_TMR_ROFLG_SID_MASK \
(ERTS_TMR_ROFLG_HLT - (Uint32) 1)
@@ -127,6 +123,13 @@ typedef struct ErtsHLTimer_ ErtsHLTimer;
#define ERTS_HLT_PFIELD_NOT_IN_TABLE (~((UWord) 0))
+typedef struct ErtsBifTimer_ ErtsBifTimer;
+
+typedef struct {
+ ErtsBifTimer *next;
+ ErtsBifTimer *prev;
+} ErtsBifTimerList;
+
typedef struct {
UWord parent; /* parent pointer and flags... */
union {
@@ -144,78 +147,86 @@ typedef struct {
typedef struct {
UWord parent; /* parent pointer and flags... */
- ErtsHLTimer *right;
- ErtsHLTimer *left;
-} ErtsHLTimerTree;
+ ErtsBifTimer *right;
+ ErtsBifTimer *left;
+} ErtsBifTimerTree;
typedef struct {
Uint32 roflgs;
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
union {
void *arg;
erts_atomic_t next;
} u;
+ union {
+ Process *proc;
+ Port *port;
+ Eterm name;
+ void (*callback)(void *);
+ } receiver;
} ErtsTmrHead;
struct ErtsHLTimer_ {
ErtsTmrHead head; /* NEED to be first! */
+ ErtsMonotonicTime timeout;
union {
ErtsThrPrgrLaterOp cleanup;
ErtsHLTimerTimeTree tree;
} time;
- ErtsMonotonicTime timeout;
- union {
- Process *proc;
- Port *port;
- Eterm name;
- void (*callback)(void *);
- } receiver;
#ifdef ERTS_HLT_HARD_DEBUG
int pending_timeout;
#endif
-
- erts_smp_atomic32_t state;
-
- /* BIF timer only fields follow... */
- struct {
- Uint32 refn[ERTS_REF_NUMBERS];
- ErtsHLTimerTree proc_tree;
- ErtsHLTimerTree tree;
- Eterm message;
- ErlHeapFragment *bp;
- } btm;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- struct {
- Eterm accessor;
- ErtsHLTimerTree tree;
- } abtm;
-#endif
};
-#define ERTS_HL_PTIMER_SIZE offsetof(ErtsHLTimer, btm)
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-#define ERTS_BIF_TIMER_SIZE offsetof(ErtsHLTimer, abtm)
-#define ERTS_ABIF_TIMER_SIZE sizeof(ErtsHLTimer)
-#else
-#define ERTS_BIF_TIMER_SIZE sizeof(ErtsHLTimer)
-#endif
-
typedef struct {
ErtsTmrHead head; /* NEED to be first! */
union {
- void *p;
- void (*callback)(void *);
+ ErtsTWheelTimer tw_tmr;
+ ErtsThrPrgrLaterOp cleanup;
} u;
- ErtsTWheelTimer tw_tmr;
} ErtsTWTimer;
+struct ErtsBifTimer_ {
+ union {
+ ErtsTmrHead head;
+ ErtsHLTimer hlt;
+ ErtsTWTimer twt;
+ } type;
+ struct {
+ erts_atomic32_t state;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin;
+ ErtsHLTimerList proc_list;
+#else
+ Uint32 refn[ERTS_REF_NUMBERS];
+ ErtsBifTimerTree proc_tree;
+ ErtsBifTimerTree tree;
+#endif
+ Eterm message;
+ ErlHeapFragment *bp;
+ } btm;
+};
+
typedef union {
ErtsTmrHead head;
ErtsHLTimer hlt;
ErtsTWTimer twt;
+ ErtsBifTimer btm;
} ErtsTimer;
+typedef ErtsTimer *(*ErtsCreateTimerFunc)(ErtsSchedulerData *esdp,
+ ErtsMonotonicTime timeout_pos,
+ int short_time, ErtsTmrType type,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
+ void (*callback)(void *), void *arg);
+
#ifdef SMALL_MEMORY
#define BIF_TIMER_PREALC_SZ 10
#define PTIMER_PREALC_SZ 10
@@ -225,7 +236,7 @@ typedef union {
#endif
ERTS_SCHED_PREF_PALLOC_IMPL(bif_timer_pre,
- ErtsHLTimer,
+ ErtsBifTimer,
BIF_TIMER_PREALC_SZ)
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(tw_timer,
@@ -252,7 +263,6 @@ typedef struct {
erts_atomic_t last;
} ErtsHLTCncldTmrQTail;
-#ifdef ERTS_SMP
typedef struct {
/*
@@ -284,7 +294,6 @@ typedef struct {
} head;
} ErtsHLTCncldTmrQ;
-#endif /* ERTS_SMP */
typedef struct {
ErtsHLTimer *root;
@@ -292,16 +301,18 @@ typedef struct {
} ErtsYieldingTimeoutState;
struct ErtsHLTimerService_ {
-#ifdef ERTS_SMP
ErtsHLTCncldTmrQ canceled_queue;
-#endif
ErtsHLTimer *time_tree;
- ErtsHLTimer *btm_tree;
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsBifTimer *btm_tree;
+#endif
ErtsHLTimer *next_timeout;
ErtsYieldingTimeoutState yield;
ErtsTWheelTimer service_timer;
};
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+
static ERTS_INLINE int
refn_is_lt(Uint32 *x, Uint32 *y)
{
@@ -317,6 +328,14 @@ refn_is_lt(Uint32 *x, Uint32 *y)
return x[0] < y[0];
}
+static ERTS_INLINE int
+refn_is_eq(Uint32 *x, Uint32 *y)
+{
+ return (x[0] == y[0]) & (x[1] == y[1]) & (x[2] == y[2]);
+}
+
+#endif
+
#define ERTS_RBT_PREFIX time
#define ERTS_RBT_T ErtsHLTimer
#define ERTS_RBT_KEY_T ErtsMonotonicTime
@@ -506,8 +525,16 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#endif /* ERTS_HLT_HARD_DEBUG */
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+#define ERTS_BTM_HLT2REFN(T) ((T)->btm.mbin->refn)
+#else
+#define ERTS_BTM_HLT2REFN(T) ((T)->btm.refn)
+#endif
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+
#define ERTS_RBT_PREFIX btm
-#define ERTS_RBT_T ErtsHLTimer
+#define ERTS_RBT_T ErtsBifTimer
#define ERTS_RBT_KEY_T Uint32 *
#define ERTS_RBT_FLAGS_T UWord
#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
@@ -533,7 +560,7 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
(T)->btm.tree.parent |= (F); \
} while (0)
#define ERTS_RBT_GET_PARENT(T) \
- ((ErtsHLTimer *) ((T)->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
+ ((ErtsBifTimer *) ((T)->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
#define ERTS_RBT_SET_PARENT(T, P) \
do { \
ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
@@ -544,20 +571,94 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.tree.right = (R))
#define ERTS_RBT_GET_LEFT(T) ((T)->btm.tree.left)
#define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.tree.left = (L))
-#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
+#define ERTS_RBT_GET_KEY(T) ERTS_BTM_HLT2REFN((T))
#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
-#define ERTS_RBT_IS_EQ(KX, KY) \
- (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
+#define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
#define ERTS_RBT_WANT_DELETE
#define ERTS_RBT_WANT_INSERT
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
#define ERTS_RBT_WANT_LOOKUP
+#endif
#define ERTS_RBT_WANT_FOREACH
#define ERTS_RBT_UNDEF
#include "erl_rbtree.h"
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static ERTS_INLINE void
+proc_btm_list_insert(ErtsBifTimer **list, ErtsBifTimer *x)
+{
+ ErtsBifTimer *y = *list;
+ if (!y) {
+ x->btm.proc_list.next = x;
+ x->btm.proc_list.prev = x;
+ *list = x;
+ }
+ else {
+ ERTS_HLT_ASSERT(y->btm.proc_list.prev->btm.proc_list.next == y);
+ x->btm.proc_list.next = y;
+ x->btm.proc_list.prev = y->btm.proc_list.prev;
+ y->btm.proc_list.prev->btm.proc_list.next = x;
+ y->btm.proc_list.prev = x;
+ }
+}
+
+static ERTS_INLINE void
+proc_btm_list_delete(ErtsBifTimer **list, ErtsBifTimer *x)
+{
+ ErtsBifTimer *y = *list;
+ if (y == x && x->btm.proc_list.next == x) {
+ ERTS_HLT_ASSERT(x->btm.proc_list.prev == x);
+ *list = NULL;
+ }
+ else {
+ if (y == x)
+ *list = x->btm.proc_list.next;
+ ERTS_HLT_ASSERT(x->btm.proc_list.prev->btm.proc_list.next == x);
+ ERTS_HLT_ASSERT(x->btm.proc_list.next->btm.proc_list.prev == x);
+ x->btm.proc_list.prev->btm.proc_list.next = x->btm.proc_list.next;
+ x->btm.proc_list.next->btm.proc_list.prev = x->btm.proc_list.prev;
+ }
+ x->btm.proc_list.next = NULL;
+}
+
+static ERTS_INLINE int
+proc_btm_list_foreach_destroy_yielding(ErtsBifTimer **list,
+ void (*destroy)(ErtsBifTimer *, void *),
+ void *arg,
+ int limit)
+{
+ int i;
+ ErtsBifTimer *first, *last;
+
+ first = *list;
+ if (!first)
+ return 0;
+
+ last = first->btm.proc_list.prev;
+ for (i = 0; i < limit; i++) {
+ ErtsBifTimer *x = last;
+ last = last->btm.proc_list.prev;
+ (*destroy)(x, arg);
+ x->btm.proc_list.next = NULL;
+ if (x == first) {
+ *list = NULL;
+ return 0;
+ }
+ }
+
+ last->btm.proc_list.next = first;
+ first->btm.proc_list.prev = last;
+ return 1;
+}
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
#define ERTS_RBT_PREFIX proc_btm
-#define ERTS_RBT_T ErtsHLTimer
+#define ERTS_RBT_T ErtsBifTimer
#define ERTS_RBT_KEY_T Uint32 *
#define ERTS_RBT_FLAGS_T UWord
#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
@@ -583,7 +684,7 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
(T)->btm.proc_tree.parent |= (F); \
} while (0)
#define ERTS_RBT_GET_PARENT(T) \
- ((ErtsHLTimer *) ((T)->btm.proc_tree.parent & ~ERTS_HLT_PFLGS_MASK))
+ ((ErtsBifTimer *) ((T)->btm.proc_tree.parent & ~ERTS_HLT_PFLGS_MASK))
#define ERTS_RBT_SET_PARENT(T, P) \
do { \
ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
@@ -594,75 +695,22 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.proc_tree.right = (R))
#define ERTS_RBT_GET_LEFT(T) ((T)->btm.proc_tree.left)
#define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.proc_tree.left = (L))
-#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
+#define ERTS_RBT_GET_KEY(T) ERTS_BTM_HLT2REFN((T))
#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
-#define ERTS_RBT_IS_EQ(KX, KY) \
- (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
-#define ERTS_RBT_WANT_DELETE
-#define ERTS_RBT_WANT_INSERT
-#define ERTS_RBT_WANT_LOOKUP
-#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
-#define ERTS_RBT_UNDEF
-
-#include "erl_rbtree.h"
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-
-#define ERTS_RBT_PREFIX abtm
-#define ERTS_RBT_T ErtsHLTimer
-#define ERTS_RBT_KEY_T Uint32 *
-#define ERTS_RBT_FLAGS_T UWord
-#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
- do { \
- (T)->abtm.tree.parent = (UWord) NULL; \
- (T)->abtm.tree.right = NULL; \
- (T)->abtm.tree.left = NULL; \
- } while (0)
-#define ERTS_RBT_IS_RED(T) \
- ((int) ((T)->abtm.tree.parent & ERTS_HLT_PFLG_RED))
-#define ERTS_RBT_SET_RED(T) \
- ((T)->abtm.tree.parent |= ERTS_HLT_PFLG_RED)
-#define ERTS_RBT_IS_BLACK(T) \
- (!ERTS_RBT_IS_RED((T)))
-#define ERTS_RBT_SET_BLACK(T) \
- ((T)->abtm.tree.parent &= ~ERTS_HLT_PFLG_RED)
-#define ERTS_RBT_GET_FLAGS(T) \
- ((T)->abtm.tree.parent & ERTS_HLT_PFLGS_MASK)
-#define ERTS_RBT_SET_FLAGS(T, F) \
- do { \
- ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
- (T)->abtm.tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
- (T)->abtm.tree.parent |= (F); \
- } while (0)
-#define ERTS_RBT_GET_PARENT(T) \
- ((ErtsHLTimer *) ((T)->abtm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
-#define ERTS_RBT_SET_PARENT(T, P) \
- do { \
- ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
- (T)->abtm.tree.parent &= ERTS_HLT_PFLGS_MASK; \
- (T)->abtm.tree.parent |= (UWord) (P); \
- } while (0)
-#define ERTS_RBT_GET_RIGHT(T) ((T)->abtm.tree.right)
-#define ERTS_RBT_SET_RIGHT(T, R) ((T)->abtm.tree.right = (R))
-#define ERTS_RBT_GET_LEFT(T) ((T)->abtm.tree.left)
-#define ERTS_RBT_SET_LEFT(T, L) ((T)->abtm.tree.left = (L))
-#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
-#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
-#define ERTS_RBT_IS_EQ(KX, KY) \
- (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
+#define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
#define ERTS_RBT_WANT_DELETE
#define ERTS_RBT_WANT_INSERT
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
#define ERTS_RBT_WANT_LOOKUP
+#endif
#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
#define ERTS_RBT_UNDEF
#include "erl_rbtree.h"
-#endif /* ERTS_BTM_ACCESSOR_SUPPORT */
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
-#ifdef ERTS_SMP
static void init_canceled_queue(ErtsHLTCncldTmrQ *cq);
-#endif
void
erts_hl_timer_init(void)
@@ -680,14 +728,14 @@ erts_create_timer_service(void)
srv = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_SERVICE,
sizeof(ErtsHLTimerService));
srv->time_tree = NULL;
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
srv->btm_tree = NULL;
+#endif
srv->next_timeout = NULL;
srv->yield = init_yield;
erts_twheel_init_timer(&srv->service_timer);
-#ifdef ERTS_SMP
init_canceled_queue(&srv->canceled_queue);
-#endif
return srv;
}
@@ -697,11 +745,8 @@ erts_timer_type_size(ErtsAlcType_t type)
{
switch (type) {
case ERTS_ALC_T_LL_PTIMER: return sizeof(ErtsTWTimer);
- case ERTS_ALC_T_HL_PTIMER: return ERTS_HL_PTIMER_SIZE;
- case ERTS_ALC_T_BIF_TIMER: return ERTS_BIF_TIMER_SIZE;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- case ERTS_ALC_T_ABIF_TIMER: return ERTS_ABIF_TIMER_SIZE;
-#endif
+ case ERTS_ALC_T_HL_PTIMER: return sizeof(ErtsHLTimer);
+ case ERTS_ALC_T_BIF_TIMER: return sizeof(ErtsBifTimer);
default: ERTS_INTERNAL_ERROR("Unknown type");
}
return 0;
@@ -732,13 +777,13 @@ get_time_left(ErtsSchedulerData *esdp, ErtsMonotonicTime timeout_pos)
static ERTS_INLINE int
proc_timeout_common(Process *proc, void *tmr)
{
- if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&proc->common.timer,
+ if (tmr == (void *) erts_atomic_cmpxchg_mb(&proc->common.timer,
ERTS_PTMR_TIMEDOUT,
(erts_aint_t) tmr)) {
erts_aint32_t state;
- erts_smp_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE);
- state = erts_smp_atomic32_read_acqb(&proc->state);
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ erts_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ state = erts_atomic32_read_acqb(&proc->state);
+ erts_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE);
if (!(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_EXITING)))
erts_schedule_process(proc, state, 0);
return 1;
@@ -749,7 +794,7 @@ proc_timeout_common(Process *proc, void *tmr)
static ERTS_INLINE int
port_timeout_common(Port *port, void *tmr)
{
- if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&port->common.timer,
+ if (tmr == (void *) erts_atomic_cmpxchg_mb(&port->common.timer,
ERTS_PTMR_TIMEDOUT,
(erts_aint_t) tmr)) {
erts_port_task_schedule(port->common.id,
@@ -760,6 +805,111 @@ port_timeout_common(Port *port, void *tmr)
return 0;
}
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static erts_atomic_t *
+mbin_to_btmref__(ErtsMagicBinary *mbin)
+{
+ return erts_binary_to_magic_indirection((Binary *) mbin);
+}
+
+static ERTS_INLINE void
+magic_binary_init(ErtsMagicBinary *mbin, ErtsBifTimer *tmr)
+{
+ erts_atomic_t *aptr = mbin_to_btmref__(mbin);
+ erts_atomic_init_nob(aptr, (erts_aint_t) tmr);
+}
+
+static ERTS_INLINE ErtsBifTimer *
+magic_binary_to_btm(ErtsMagicBinary *mbin)
+{
+ erts_atomic_t *aptr = mbin_to_btmref__(mbin);
+ ErtsBifTimer *tmr = (ErtsBifTimer *) erts_atomic_read_nob(aptr);
+ ERTS_HLT_ASSERT(!tmr || tmr->btm.mbin == mbin);
+ return tmr;
+}
+
+#endif /* ERTS_MAGIC_REF_BIF_TIMERS */
+
+static ERTS_INLINE erts_aint_t
+init_btm_specifics(ErtsSchedulerData *esdp,
+ ErtsBifTimer *tmr, Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin
+#else
+ Uint32 *refn
+#endif
+ )
+{
+ Uint hsz = is_immed(msg) ? ((Uint) 0) : size_object(msg);
+ int refc;
+ if (!hsz) {
+ tmr->btm.message = msg;
+ tmr->btm.bp = NULL;
+ }
+ else {
+ ErlHeapFragment *bp = new_message_buffer(hsz);
+ Eterm *hp = bp->mem;
+ tmr->btm.message = copy_struct(msg, hsz, &hp, &bp->off_heap);
+ tmr->btm.bp = bp;
+ }
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ refc = 1;
+ tmr->btm.mbin = mbin;
+ erts_refc_inc(&mbin->refc, 1);
+ magic_binary_init(mbin, tmr);
+ tmr->btm.proc_list.next = NULL;
+#else
+ refc = 0;
+ tmr->btm.refn[0] = refn[0];
+ tmr->btm.refn[1] = refn[1];
+ tmr->btm.refn[2] = refn[2];
+
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+
+ btm_rbt_insert(&esdp->timer_service->btm_tree, tmr);
+#endif
+
+ erts_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE);
+ return refc; /* refc from magic binary... */
+}
+
+static void tw_bif_timer_timeout(void *vbtmp);
+
+static ERTS_INLINE void
+timer_destroy(ErtsTimer *tmr, int twt, int btm)
+{
+ if (!btm) {
+ if (twt)
+ tw_timer_free(&tmr->twt);
+ else
+ erts_free(ERTS_ALC_T_HL_PTIMER, tmr);
+ }
+ else {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ Binary *bp = (Binary *) tmr->btm.btm.mbin;
+ if (erts_refc_dectest(&bp->refc, 0) == 0)
+ erts_bin_free(bp);
+#endif
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_PRE_ALC)
+ bif_timer_pre_free(&tmr->btm);
+ else
+ erts_free(ERTS_ALC_T_BIF_TIMER, &tmr->btm);
+ }
+}
+
+static ERTS_INLINE void
+timer_pre_dec_refc(ErtsTimer *tmr)
+{
+#ifdef ERTS_HLT_DEBUG
+ erts_aint_t refc;
+ refc = erts_atomic32_dec_read_nob(&tmr->head.refc);
+ ERTS_HLT_ASSERT(refc > 0);
+#else
+ erts_atomic32_dec_nob(&tmr->head.refc);
+#endif
+}
+
/*
* Basic timer wheel timer stuff
*/
@@ -767,33 +917,46 @@ port_timeout_common(Port *port, void *tmr)
static void
scheduled_tw_timer_destroy(void *vtmr)
{
- tw_timer_free((ErtsTWTimer *) vtmr);
+ ErtsTimer * tmr = (ErtsTimer *) vtmr;
+ int btm = !!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+ timer_destroy((ErtsTimer *) vtmr, 1, btm);
}
static void
schedule_tw_timer_destroy(ErtsTWTimer *tmr)
{
+ Uint size;
/*
* Reference to process/port can be
* dropped at once...
*/
if (tmr->head.roflgs & ERTS_TMR_ROFLG_PROC)
- erts_proc_dec_refc((Process *) tmr->u.p);
+ erts_proc_dec_refc(tmr->head.receiver.proc);
else if (tmr->head.roflgs & ERTS_TMR_ROFLG_PORT)
- erts_port_dec_refc((Port *) tmr->u.p);
+ erts_port_dec_refc(tmr->head.receiver.port);
+
+ if (!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ size = sizeof(ErtsHLTimer);
+ else {
+ /* Message buffer already dropped... */
+ size = sizeof(ErtsBifTimer);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ size += sizeof(ErtsMagicIndirectionWord);
+#endif
+ }
erts_schedule_thr_prgr_later_cleanup_op(
scheduled_tw_timer_destroy,
(void *) tmr,
- &tmr->tw_tmr.u.cleanup,
- sizeof(ErtsTWTimer));
+ &tmr->u.cleanup,
+ size);
}
static ERTS_INLINE void
tw_timer_dec_refc(ErtsTWTimer *tmr)
{
- if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
- ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore;
+ if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
schedule_tw_timer_destroy(tmr);
}
}
@@ -802,7 +965,7 @@ static void
tw_proc_timeout(void *vtwtp)
{
ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
- Process *proc = (Process *) twtp->u.p;
+ Process *proc = twtp->head.receiver.proc;
if (proc_timeout_common(proc, vtwtp))
tw_timer_dec_refc(twtp);
tw_timer_dec_refc(twtp);
@@ -812,127 +975,152 @@ static void
tw_port_timeout(void *vtwtp)
{
ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
- Port *port = (Port *) twtp->u.p;
+ Port *port = twtp->head.receiver.port;
if (port_timeout_common(port, vtwtp))
tw_timer_dec_refc(twtp);
tw_timer_dec_refc(twtp);
}
static void
-tw_ptimer_cancel(void *vtwtp)
-{
- tw_timer_dec_refc((ErtsTWTimer *) vtwtp);
-}
-
-static void
cancel_tw_timer(ErtsSchedulerData *esdp, ErtsTWTimer *tmr)
{
ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
== (Uint32) esdp->no);
- erts_twheel_cancel_timer(esdp->timer_wheel, &tmr->tw_tmr);
+ erts_twheel_cancel_timer(esdp->timer_wheel, &tmr->u.tw_tmr);
+ tw_timer_dec_refc(tmr);
}
static void
tw_callback_timeout(void *vtwtp)
{
ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
- void (*callback)(void *) = twtp->u.callback;
+ void (*callback)(void *) = twtp->head.receiver.callback;
void *arg = twtp->head.u.arg;
tw_timer_dec_refc(twtp);
(*callback)(arg);
}
-static ErtsTWTimer *
-create_tw_timer(ErtsSchedulerData *esdp,
- ErtsTmrType type, void *p,
- void (*callback)(void *), void *arg,
- ErtsMonotonicTime timeout_pos)
+static ErtsTimer *
+create_tw_timer(ErtsSchedulerData *esdp,
+ ErtsMonotonicTime timeout_pos,
+ int short_time, ErtsTmrType type,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
+ void (*callback)(void *), void *arg)
{
ErtsTWTimer *tmr;
void (*timeout_func)(void *);
- void (*cancel_func)(void *);
erts_aint32_t refc;
- tmr = tw_timer_alloc();
- erts_twheel_init_timer(&tmr->tw_tmr);
-
- tmr->head.roflgs = (Uint32) esdp->no;
- ERTS_HLT_ASSERT((tmr->head.roflgs
- & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
+ if (type != ERTS_TMR_BIF) {
+ tmr = tw_timer_alloc();
+ tmr->head.roflgs = 0;
+ }
+ else {
+ if (short_time) {
+ tmr = (ErtsTWTimer *) bif_timer_pre_alloc();
+ if (!tmr)
+ goto alloc_bif_timer;
+ tmr->head.roflgs = (ERTS_TMR_ROFLG_BIF_TMR
+ | ERTS_TMR_ROFLG_PRE_ALC);
+ }
+ else {
+ alloc_bif_timer:
+ tmr = (ErtsTWTimer *) erts_alloc(ERTS_ALC_T_BIF_TIMER,
+ sizeof(ErtsBifTimer));
+ tmr->head.roflgs = ERTS_TMR_ROFLG_BIF_TMR;
+ }
+ }
+
+ erts_twheel_init_timer(&tmr->u.tw_tmr);
+ tmr->head.roflgs |= (Uint32) esdp->no;
+ ERTS_HLT_ASSERT((((Uint32) esdp->no)
+ & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
switch (type) {
case ERTS_TMR_PROC:
- tmr->u.p = p;
+ tmr->head.receiver.proc = (Process *) rcvrp;
tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
timeout_func = tw_proc_timeout;
- cancel_func = tw_ptimer_cancel;
- erts_proc_inc_refc((Process *) p);
+ erts_proc_inc_refc((Process *) rcvrp);
refc = 2;
break;
case ERTS_TMR_PORT:
- tmr->u.p = p;
+ tmr->head.receiver.port = (Port *) rcvrp;
tmr->head.roflgs |= ERTS_TMR_ROFLG_PORT;
timeout_func = tw_port_timeout;
- cancel_func = tw_ptimer_cancel;
- erts_port_inc_refc((Port *) p);
+ erts_port_inc_refc((Port *) rcvrp);
refc = 2;
break;
case ERTS_TMR_CALLBACK:
tmr->head.u.arg = arg;
- tmr->u.callback = callback;
+ tmr->head.receiver.callback = callback;
tmr->head.roflgs |= ERTS_TMR_ROFLG_CALLBACK;
timeout_func = tw_callback_timeout;
- cancel_func = NULL;
refc = 1;
break;
+ case ERTS_TMR_BIF:
+
+ timeout_func = tw_bif_timer_timeout;
+ if (is_internal_pid(rcvr)) {
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
+ tmr->head.receiver.proc = (Process *) rcvrp;
+ refc = 2;
+ }
+ else {
+ ERTS_HLT_ASSERT(is_atom(rcvr));
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_REG_NAME;
+ tmr->head.receiver.name = (Eterm) rcvr;
+ refc = 1;
+ }
+
+ refc += init_btm_specifics(esdp,
+ (ErtsBifTimer *) tmr,
+ msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin
+#else
+ refn
+#endif
+ );
+ break;
+
default:
ERTS_INTERNAL_ERROR("Unsupported timer type");
return NULL;
}
- erts_smp_atomic32_init_nob(&tmr->head.refc, refc);
+ erts_atomic32_init_nob(&tmr->head.refc, refc);
erts_twheel_set_timer(esdp->timer_wheel,
- &tmr->tw_tmr,
+ &tmr->u.tw_tmr,
timeout_func,
- cancel_func,
tmr,
timeout_pos);
- return tmr;
+ return (ErtsTimer *) tmr;
}
/*
* Basic high level timer stuff
*/
-static ERTS_INLINE void
-hl_timer_destroy(ErtsHLTimer *tmr)
-{
- Uint32 roflgs = tmr->head.roflgs;
- if (!(roflgs & ERTS_TMR_ROFLG_BIF_TMR))
- erts_free(ERTS_ALC_T_HL_PTIMER, tmr);
- else {
- if (roflgs & ERTS_TMR_ROFLG_PRE_ALC)
- bif_timer_pre_free(tmr);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- else if (roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
- erts_free(ERTS_ALC_T_ABIF_TIMER, tmr);
-#endif
- else
- erts_free(ERTS_ALC_T_BIF_TIMER, tmr);
- }
-}
-
static void
scheduled_hl_timer_destroy(void *vtmr)
{
- hl_timer_destroy((ErtsHLTimer *) vtmr);
+ ErtsTimer * tmr = (ErtsTimer *) vtmr;
+ int btm = !!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+ timer_destroy((ErtsTimer *) vtmr, 0, btm);
}
static void
@@ -945,28 +1133,28 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs)
* at once...
*/
- ERTS_HLT_ASSERT(erts_smp_atomic32_read_nob(&tmr->head.refc) == 0);
+ ERTS_HLT_ASSERT(erts_atomic32_read_nob(&tmr->head.refc) == 0);
if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
- ERTS_HLT_ASSERT(is_atom(tmr->receiver.name));
+ ERTS_HLT_ASSERT(is_atom(tmr->head.receiver.name));
}
else if (roflgs & ERTS_TMR_ROFLG_PROC) {
- ERTS_HLT_ASSERT(tmr->receiver.proc);
- erts_proc_dec_refc(tmr->receiver.proc);
+ ERTS_HLT_ASSERT(tmr->head.receiver.proc);
+ erts_proc_dec_refc(tmr->head.receiver.proc);
}
else if (roflgs & ERTS_TMR_ROFLG_PORT) {
- ERTS_HLT_ASSERT(tmr->receiver.port);
- erts_port_dec_refc(tmr->receiver.port);
+ ERTS_HLT_ASSERT(tmr->head.receiver.port);
+ erts_port_dec_refc(tmr->head.receiver.port);
}
if (!(roflgs & ERTS_TMR_ROFLG_BIF_TMR))
- size = ERTS_HL_PTIMER_SIZE;
- else {
- /*
- * Message buffer can be dropped at
- * once...
- */
size = sizeof(ErtsHLTimer);
+ else {
+ /* Message buffer already dropped... */
+ size = sizeof(ErtsBifTimer);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ size += sizeof(ErtsMagicIndirectionWord);
+#endif
}
erts_schedule_thr_prgr_later_cleanup_op(
@@ -975,28 +1163,15 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs)
}
static ERTS_INLINE void
-hl_timer_pre_dec_refc(ErtsHLTimer *tmr)
-{
-#ifdef ERTS_HLT_DEBUG
- erts_aint_t refc;
- refc = erts_smp_atomic32_dec_read_nob(&tmr->head.refc);
- ERTS_HLT_ASSERT(refc > 0);
-#else
- erts_smp_atomic32_dec_nob(&tmr->head.refc);
-#endif
-}
-
-static ERTS_INLINE void
hl_timer_dec_refc(ErtsHLTimer *tmr, Uint32 roflgs)
{
- if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
- ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore;
+ if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
schedule_hl_timer_destroy(tmr, roflgs);
}
}
static void hlt_service_timeout(void *vesdp);
-#ifdef ERTS_SMP
static void handle_canceled_queue(ErtsSchedulerData *esdp,
ErtsHLTCncldTmrQ *cq,
int use_limit,
@@ -1004,12 +1179,11 @@ static void handle_canceled_queue(ErtsSchedulerData *esdp,
int *need_thr_progress,
ErtsThrPrgrVal *thr_prgr_p,
int *need_more_work);
-#endif
static ERTS_INLINE void
check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv)
{
-#if defined(ERTS_SMP) && ERTS_TMR_CHECK_CANCEL_ON_CREATE
+#if ERTS_TMR_CHECK_CANCEL_ON_CREATE
ErtsHLTCncldTmrQ *cq = &srv->canceled_queue;
if (cq->head.first != cq->head.unref_end)
handle_canceled_queue(esdp, cq, 1,
@@ -1018,39 +1192,136 @@ check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv)
#endif
}
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-static void
-hlt_delete_abtm(ErtsHLTimer *tmr)
+static int
+bif_timer_ref_destructor(Binary *unused)
{
- Process *proc;
+ return 1;
+}
- ERTS_HLT_ASSERT(tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR);
+static ERTS_INLINE void
+btm_clear_magic_binary(ErtsBifTimer *tmr)
+{
+ erts_atomic_t *aptr = mbin_to_btmref__(tmr->btm.mbin);
+ Uint32 roflgs = tmr->type.head.roflgs;
+#ifdef ERTS_HLT_DEBUG
+ erts_aint_t tval = erts_atomic_xchg_nob(aptr,
+ (erts_aint_t) NULL);
+ ERTS_HLT_ASSERT(tval == (erts_aint_t) tmr);
+#else
+ erts_atomic_set_nob(aptr, (erts_aint_t) NULL);
+#endif
+ if (roflgs & ERTS_TMR_ROFLG_HLT)
+ hl_timer_dec_refc(&tmr->type.hlt, roflgs);
+ else
+ tw_timer_dec_refc(&tmr->type.twt);
+}
- proc = erts_proc_lookup(tmr->abtm.accessor);
+#endif /* ERTS_MAGIC_REF_BIF_TIMERS */
- if (proc) {
- int deref = 0;
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
- if (tmr->abtm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- abtm_rbt_delete(&proc->accessor_bif_timers, tmr);
- deref = 1;
- tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- }
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
- if (deref)
- hl_timer_pre_dec_refc(tmr);
+static ERTS_INLINE void
+bif_timer_timeout(ErtsHLTimerService *srv,
+ ErtsBifTimer *tmr,
+ Uint32 roflgs)
+{
+ erts_aint32_t state;
+
+ ERTS_HLT_ASSERT(tmr->type.head.roflgs == roflgs);
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+
+ state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state,
+ ERTS_TMR_STATE_TIMED_OUT,
+ ERTS_TMR_STATE_ACTIVE);
+
+ ERTS_HLT_ASSERT(state == ERTS_TMR_STATE_CANCELED
+ || state == ERTS_TMR_STATE_ACTIVE);
+
+ if (state == ERTS_TMR_STATE_ACTIVE) {
+ Process *proc;
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+
+ if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
+ Eterm term;
+ term = tmr->type.head.receiver.name;
+ ERTS_HLT_ASSERT(is_atom(term));
+ term = erts_whereis_name_to_id(NULL, term);
+ proc = erts_proc_lookup(term);
+ }
+ else {
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PROC);
+ proc = tmr->type.head.receiver.proc;
+ ERTS_HLT_ASSERT(proc);
+ }
+ if (proc) {
+ int dec_refc = 0;
+ ErtsMessage *mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = tmr->btm.bp;
+ tmr->btm.bp = NULL;
+ erts_queue_message(proc, 0, mp, tmr->btm.message,
+ am_clock_service);
+ erts_proc_lock(proc, ERTS_PROC_LOCK_BTM);
+ /* If the process is exiting do not disturb the cleanup... */
+ if (!ERTS_PROC_IS_EXITING(proc)) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.proc_list.next) {
+ proc_btm_list_delete(&proc->bif_timers, tmr);
+ dec_refc = 1;
+ }
+#else
+ if (tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ proc_btm_rbt_delete(&proc->bif_timers, tmr);
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ dec_refc = 1;
+ }
+#endif
+ }
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+ if (dec_refc)
+ timer_pre_dec_refc((ErtsTimer *) tmr);
+ }
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
}
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&srv->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
+
+
}
+static void
+tw_bif_timer_timeout(void *vbtmp)
+{
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsHLTimerService *srv = NULL;
+#else
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsHLTimerService *srv = esdp->timer_service;
#endif
+ ErtsBifTimer *btmp = (ErtsBifTimer *) vbtmp;
+ bif_timer_timeout(srv, btmp, btmp->type.head.roflgs);
+ tw_timer_dec_refc(&btmp->type.twt);
+}
-static ErtsHLTimer *
+static ErtsTimer *
create_hl_timer(ErtsSchedulerData *esdp,
ErtsMonotonicTime timeout_pos,
int short_time, ErtsTmrType type,
- void *rcvrp, Eterm rcvr, Eterm acsr,
- Eterm msg, Uint32 *refn,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
void (*callback)(void *), void *arg)
{
ErtsHLTimerService *srv = esdp->timer_service;
@@ -1069,7 +1340,7 @@ create_hl_timer(ErtsSchedulerData *esdp,
if (type != ERTS_TMR_BIF) {
tmr = erts_alloc(ERTS_ALC_T_HL_PTIMER,
- ERTS_HL_PTIMER_SIZE);
+ sizeof(ErtsHLTimer));
tmr->timeout = timeout_pos;
switch (type) {
@@ -1078,7 +1349,7 @@ create_hl_timer(ErtsSchedulerData *esdp,
ERTS_HLT_ASSERT(is_internal_pid(rcvr));
erts_proc_inc_refc((Process *) rcvrp);
- tmr->receiver.proc = (Process *) rcvrp;
+ tmr->head.receiver.proc = (Process *) rcvrp;
roflgs |= ERTS_TMR_ROFLG_PROC;
refc = 2;
break;
@@ -1086,14 +1357,14 @@ create_hl_timer(ErtsSchedulerData *esdp,
case ERTS_TMR_PORT:
ERTS_HLT_ASSERT(is_internal_port(rcvr));
erts_port_inc_refc((Port *) rcvrp);
- tmr->receiver.port = (Port *) rcvrp;
+ tmr->head.receiver.port = (Port *) rcvrp;
roflgs |= ERTS_TMR_ROFLG_PORT;
refc = 2;
break;
case ERTS_TMR_CALLBACK:
roflgs |= ERTS_TMR_ROFLG_CALLBACK;
- tmr->receiver.callback = callback;
+ tmr->head.receiver.callback = callback;
tmr->head.u.arg = arg;
refc = 1;
break;
@@ -1105,84 +1376,47 @@ create_hl_timer(ErtsSchedulerData *esdp,
}
else { /* ERTS_TMR_BIF */
- Uint hsz;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- int is_abif_tmr = is_value(acsr) && acsr != rcvr;
-#endif
if (short_time) {
- tmr = bif_timer_pre_alloc();
+ tmr = (ErtsHLTimer *) bif_timer_pre_alloc();
if (!tmr)
goto alloc_bif_timer;
roflgs |= ERTS_TMR_ROFLG_PRE_ALC;
}
else {
alloc_bif_timer:
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (is_abif_tmr)
- tmr = erts_alloc(ERTS_ALC_T_ABIF_TIMER,
- ERTS_ABIF_TIMER_SIZE);
- else
-#endif
- tmr = erts_alloc(ERTS_ALC_T_BIF_TIMER,
- ERTS_BIF_TIMER_SIZE);
- }
+ tmr = (ErtsHLTimer *) erts_alloc(ERTS_ALC_T_BIF_TIMER,
+ sizeof(ErtsBifTimer));
+ }
tmr->timeout = timeout_pos;
roflgs |= ERTS_TMR_ROFLG_BIF_TMR;
if (is_internal_pid(rcvr)) {
roflgs |= ERTS_TMR_ROFLG_PROC;
- tmr->receiver.proc = (Process *) rcvrp;
+ tmr->head.receiver.proc = (Process *) rcvrp;
refc = 2;
}
else {
ERTS_HLT_ASSERT(is_atom(rcvr));
roflgs |= ERTS_TMR_ROFLG_REG_NAME;
- tmr->receiver.name = rcvr;
+ tmr->head.receiver.name = rcvr;
refc = 1;
}
- hsz = is_immed(msg) ? ((Uint) 0) : size_object(msg);
- if (!hsz) {
- tmr->btm.message = msg;
- tmr->btm.bp = NULL;
- }
- else {
- ErlHeapFragment *bp = new_message_buffer(hsz);
- Eterm *hp = bp->mem;
- tmr->btm.message = copy_struct(msg, hsz, &hp, &bp->off_heap);
- tmr->btm.bp = bp;
- }
- tmr->btm.refn[0] = refn[0];
- tmr->btm.refn[1] = refn[1];
- tmr->btm.refn[2] = refn[2];
-
- tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (is_abif_tmr) {
- Process *aproc;
- roflgs |= ERTS_TMR_ROFLG_ABIF_TMR;
- tmr->abtm.accessor = acsr;
- aproc = erts_proc_lookup(acsr);
- if (!aproc)
- tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- else {
- refc++;
- erts_smp_proc_lock(aproc, ERTS_PROC_LOCK_BTM);
- abtm_rbt_insert(&aproc->accessor_bif_timers, tmr);
- erts_smp_proc_unlock(aproc, ERTS_PROC_LOCK_BTM);
- }
- }
+ refc += init_btm_specifics(esdp,
+ (ErtsBifTimer *) tmr,
+ msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin
+#else
+ refn
#endif
-
- btm_rbt_insert(&srv->btm_tree, tmr);
+ );
}
tmr->head.roflgs = roflgs;
- erts_smp_atomic32_init_nob(&tmr->head.refc, refc);
- erts_smp_atomic32_init_nob(&tmr->state, ERTS_TMR_STATE_ACTIVE);
+ erts_atomic32_init_nob(&tmr->head.refc, refc);
if (!srv->next_timeout
|| tmr->timeout < srv->next_timeout->timeout) {
@@ -1192,7 +1426,6 @@ create_hl_timer(ErtsSchedulerData *esdp,
erts_twheel_set_timer(esdp->timer_wheel,
&srv->service_timer,
hlt_service_timeout,
- NULL,
(void *) esdp,
tmr->timeout);
srv->next_timeout = tmr;
@@ -1209,79 +1442,20 @@ create_hl_timer(ErtsSchedulerData *esdp,
ERTS_HLT_HDBG_CHK_SRV(srv);
- return tmr;
-}
-
-static ERTS_INLINE void
-hlt_bif_timer_timeout(ErtsHLTimer *tmr, Uint32 roflgs)
-{
- ErtsProcLocks proc_locks = ERTS_PROC_LOCKS_MSG_SEND;
- Process *proc;
- int queued_message = 0;
- int dec_refc = 0;
- Uint32 is_reg_name = (roflgs & ERTS_TMR_ROFLG_REG_NAME);
- ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR);
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
- hlt_delete_abtm(tmr);
-#endif
-
- if (is_reg_name) {
- Eterm pid;
- ERTS_HLT_ASSERT(is_atom(tmr->receiver.name));
- pid = erts_whereis_name_to_id(NULL, tmr->receiver.name);
- proc = erts_proc_lookup(pid);
- }
- else {
- ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PROC);
- ERTS_HLT_ASSERT(tmr->receiver.proc);
-
- proc = tmr->receiver.proc;
- proc_locks |= ERTS_PROC_LOCK_BTM;
- }
- if (proc) {
- erts_smp_proc_lock(proc, proc_locks);
- /*
- * If process is exiting, let it clean up
- * the btm tree by itself (it may be in
- * the middle of tree destruction).
- */
- if (!ERTS_PROC_IS_EXITING(proc)) {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = tmr->btm.bp;
- erts_queue_message(proc, proc_locks, mp,
- tmr->btm.message, am_clock_service);
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_SEND);
- queued_message = 1;
- proc_locks &= ~ERTS_PROC_LOCKS_MSG_SEND;
- tmr->btm.bp = NULL;
- if (tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- proc_btm_rbt_delete(&proc->bif_timers, tmr);
- tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- dec_refc = 1;
- }
- }
- if (proc_locks)
- erts_smp_proc_unlock(proc, proc_locks);
- if (dec_refc)
- hl_timer_pre_dec_refc(tmr);
- }
- if (!queued_message && tmr->btm.bp)
- free_message_buffer(tmr->btm.bp);
+ return (ErtsTimer *) tmr;
}
static ERTS_INLINE void
hlt_proc_timeout(ErtsHLTimer *tmr)
{
- if (proc_timeout_common(tmr->receiver.proc, (void *) tmr))
+ if (proc_timeout_common(tmr->head.receiver.proc, (void *) tmr))
hl_timer_dec_refc(tmr, tmr->head.roflgs);
}
static ERTS_INLINE void
hlt_port_timeout(ErtsHLTimer *tmr)
{
- if (port_timeout_common(tmr->receiver.port, (void *) tmr))
+ if (port_timeout_common(tmr->head.receiver.port, (void *) tmr))
hl_timer_dec_refc(tmr, tmr->head.roflgs);
}
@@ -1289,41 +1463,24 @@ static void hlt_timeout(ErtsHLTimer *tmr, void *vsrv)
{
ErtsHLTimerService *srv = (ErtsHLTimerService *) vsrv;
Uint32 roflgs;
- erts_aint32_t state;
ERTS_HLT_HDBG_CHK_SRV(srv);
roflgs = tmr->head.roflgs;
ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_HLT);
- state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
- ERTS_TMR_STATE_TIMED_OUT,
- ERTS_TMR_STATE_ACTIVE);
-
- ERTS_HLT_ASSERT(state == ERTS_TMR_STATE_CANCELED
- || state == ERTS_TMR_STATE_ACTIVE);
-
- if (state == ERTS_TMR_STATE_ACTIVE) {
-
- if (roflgs & ERTS_TMR_ROFLG_BIF_TMR)
- hlt_bif_timer_timeout(tmr, roflgs);
- else if (roflgs & ERTS_TMR_ROFLG_PROC)
- hlt_proc_timeout(tmr);
- else if (roflgs & ERTS_TMR_ROFLG_PORT)
- hlt_port_timeout(tmr);
- else {
- ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_CALLBACK);
- (*tmr->receiver.callback)(tmr->head.u.arg);
- }
-
+ if (roflgs & ERTS_TMR_ROFLG_BIF_TMR)
+ bif_timer_timeout(srv, (ErtsBifTimer *) tmr, roflgs);
+ else if (roflgs & ERTS_TMR_ROFLG_PROC)
+ hlt_proc_timeout(tmr);
+ else if (roflgs & ERTS_TMR_ROFLG_PORT)
+ hlt_port_timeout(tmr);
+ else {
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_CALLBACK);
+ (*tmr->head.receiver.callback)(tmr->head.u.arg);
}
tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- if ((roflgs & ERTS_TMR_ROFLG_BIF_TMR)
- && tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- btm_rbt_delete(&srv->btm_tree, tmr);
- tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- }
ERTS_HLT_HDBG_CHK_SRV(srv);
@@ -1390,7 +1547,6 @@ hlt_service_timeout(void *vesdp)
erts_twheel_set_timer(esdp->timer_wheel,
&srv->service_timer,
hlt_service_timeout,
- NULL,
vesdp,
tmr->timeout);
}
@@ -1402,19 +1558,6 @@ hlt_delete_timer(ErtsSchedulerData *esdp, ErtsHLTimer *tmr)
ERTS_HLT_HDBG_CHK_SRV(srv);
- if (tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR) {
-
- if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- btm_rbt_delete(&srv->btm_tree, tmr);
- tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- }
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
- hlt_delete_abtm(tmr);
-#endif
- }
-
if (tmr->time.tree.parent == ERTS_HLT_PFIELD_NOT_IN_TABLE) {
/* Already removed... */
ERTS_HLT_HDBG_CHK_SRV(srv);
@@ -1460,7 +1603,6 @@ hlt_delete_timer(ErtsSchedulerData *esdp, ErtsHLTimer *tmr)
erts_twheel_set_timer(esdp->timer_wheel,
&srv->service_timer,
hlt_service_timeout,
- NULL,
(void *) esdp,
smlst->timeout);
}
@@ -1485,6 +1627,17 @@ cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp,
ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
== (Uint32) esdp->no);
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (roflgs & ERTS_TMR_ROFLG_BIF_TMR) {
+ ErtsBifTimer *btm = (ErtsBifTimer *) tmr;
+ if (btm->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, btm);
+ btm->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+ }
+#endif
+
if (roflgs & ERTS_TMR_ROFLG_HLT) {
hlt_delete_timer(esdp, &tmr->hlt);
hl_timer_dec_refc(&tmr->hlt, roflgs);
@@ -1495,7 +1648,6 @@ cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp,
}
}
-#ifdef ERTS_SMP
static void
init_canceled_queue(ErtsHLTCncldTmrQ *cq)
@@ -1625,7 +1777,7 @@ cq_check_incoming(ErtsSchedulerData *esdp, ErtsHLTCncldTmrQ *cq)
cq->head.next.thr_progress_reached = 1;
/* Move unreferenced end pointer forward... */
- ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore;
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
cq->head.unref_end = cq->head.next.unref_end;
@@ -1718,31 +1870,24 @@ erts_handle_canceled_timers(void *vesdp,
need_more_work);
}
-#endif /* ERTS_SMP */
static void
queue_canceled_timer(ErtsSchedulerData *esdp, int rsched_id, ErtsTimer *tmr)
{
-#ifdef ERTS_SMP
ErtsHLTCncldTmrQ *cq;
cq = &ERTS_SCHEDULER_IX(rsched_id-1)->timer_service->canceled_queue;
if (cq_enqueue(cq, tmr, rsched_id - (int) esdp->no))
erts_notify_canceled_timer(esdp, rsched_id);
-#else
- ERTS_INTERNAL_ERROR("Unexpected enqueue of canceled timer");
-#endif
}
static void
continue_cancel_ptimer(ErtsSchedulerData *esdp, ErtsTimer *tmr)
{
-#ifdef ERTS_SMP
Uint32 sid = (tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK);
if (esdp->no != sid)
queue_canceled_timer(esdp, sid, tmr);
else
-#endif
cleanup_sched_local_canceled_timer(esdp, tmr);
}
@@ -1750,57 +1895,86 @@ continue_cancel_ptimer(ErtsSchedulerData *esdp, ErtsTimer *tmr)
* BIF timer specific
*/
+
Uint erts_bif_timer_memory_size(void)
{
return (Uint) 0;
}
static BIF_RETTYPE
-setup_bif_timer(Process *c_p, ErtsMonotonicTime timeout_pos,
- int short_time, Eterm rcvr, Eterm acsr,
- Eterm msg, int wrap)
+setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos,
+ int short_time, Eterm rcvr, Eterm msg, int wrap)
{
BIF_RETTYPE ret;
Eterm ref, tmo_msg, *hp;
- ErtsHLTimer *tmr;
+ ErtsBifTimer *tmr;
ErtsSchedulerData *esdp;
- DeclareTmpHeap(tmp_hp, 4, c_p);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ Binary *mbin;
+#endif
+ Eterm tmp_hp[4];
+ ErtsCreateTimerFunc create_timer;
if (is_not_internal_pid(rcvr) && is_not_atom(rcvr))
goto badarg;
esdp = erts_proc_sched_data(c_p);
- hp = HAlloc(c_p, REF_THING_SIZE);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin = erts_create_magic_indirection(bif_timer_ref_destructor);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ ref = erts_mk_magic_ref(&hp, &c_p->off_heap, mbin);
+ ASSERT(erts_get_ref_numbers_thr_id(((ErtsMagicBinary *)mbin)->refn)
+ == (Uint32) esdp->no);
+#else
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
ref = erts_sched_make_ref_in_buffer(esdp, hp);
-
- ASSERT(erts_get_ref_numbers_thr_id(
- internal_ref_numbers(ref)) == (Uint32) esdp->no);
-
- UseTmpHeap(4, c_p);
+ ASSERT(erts_get_ref_numbers_thr_id(internal_ordinary_ref_numbers(ref))
+ == (Uint32) esdp->no);
+#endif
tmo_msg = wrap ? TUPLE3(tmp_hp, am_timeout, ref, msg) : msg;
- tmr = create_hl_timer(esdp, timeout_pos, short_time,
- ERTS_TMR_BIF, NULL, rcvr, acsr, tmo_msg,
- internal_ref_numbers(ref), NULL, NULL);
-
- UnUseTmpHeap(4, c_p);
+ create_timer = twheel ? create_tw_timer : create_hl_timer;
+ tmr = (ErtsBifTimer *) create_timer(esdp, timeout_pos,
+ short_time, ERTS_TMR_BIF,
+ NULL, rcvr, tmo_msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ (ErtsMagicBinary *) mbin,
+#else
+ internal_ordinary_ref_numbers(ref),
+#endif
+ NULL, NULL);
if (is_internal_pid(rcvr)) {
Process *proc = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
rcvr, ERTS_PROC_LOCK_BTM,
ERTS_P2P_FLG_INC_REFC);
if (!proc) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#else
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
if (tmr->btm.bp)
free_message_buffer(tmr->btm.bp);
- hlt_delete_timer(esdp, tmr);
- hl_timer_destroy(tmr);
+ if (twheel)
+ cancel_tw_timer(esdp, &tmr->type.twt);
+ else
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ timer_destroy((ErtsTimer *) tmr, twheel, 1);
}
else {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ proc_btm_list_insert(&proc->bif_timers, tmr);
+#else
proc_btm_rbt_insert(&proc->bif_timers, tmr);
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
- tmr->receiver.proc = proc;
+#endif
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+ tmr->type.head.receiver.proc = proc;
}
}
@@ -1814,57 +1988,272 @@ badarg:
}
static int
-cancel_bif_timer(ErtsHLTimer *tmr)
+cancel_bif_timer(ErtsBifTimer *tmr)
{
erts_aint_t state;
Uint32 roflgs;
int res;
- state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
+ state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state,
ERTS_TMR_STATE_CANCELED,
ERTS_TMR_STATE_ACTIVE);
if (state != ERTS_TMR_STATE_ACTIVE)
return 0;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+
if (tmr->btm.bp)
free_message_buffer(tmr->btm.bp);
res = -1;
- roflgs = tmr->head.roflgs;
+ roflgs = tmr->type.head.roflgs;
if (roflgs & ERTS_TMR_ROFLG_PROC) {
- Process *proc = tmr->receiver.proc;
- ERTS_HLT_ASSERT(!(tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME));
+ Process *proc;
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
+ proc = tmr->type.head.receiver.proc;
+ ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME));
+
+ erts_proc_lock(proc, ERTS_PROC_LOCK_BTM);
/*
* If process is exiting, let it clean up
* the btm tree by itself (it may be in
* the middle of tree destruction).
*/
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!ERTS_PROC_IS_EXITING(proc) && tmr->btm.proc_list.next) {
+ proc_btm_list_delete(&proc->bif_timers, tmr);
+ res = 1;
+ }
+#else
if (!ERTS_PROC_IS_EXITING(proc)
&& tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
proc_btm_rbt_delete(&proc->bif_timers, tmr);
tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
res = 1;
}
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+#endif
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
}
return res;
}
+static ERTS_INLINE Sint64
+access_btm(ErtsBifTimer *tmr, Uint32 sid, ErtsSchedulerData *esdp, int cancel)
+{
+ int cncl_res;
+ Sint64 time_left;
+ ErtsMonotonicTime timeout;
+ int is_hlt;
+
+ if (!tmr)
+ return -1;
+
+ is_hlt = !!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ timeout = (is_hlt
+ ? tmr->type.hlt.timeout
+ : erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr));
+
+ if (!cancel) {
+ erts_aint32_t state = erts_atomic32_read_acqb(&tmr->btm.state);
+ if (state == ERTS_TMR_STATE_ACTIVE)
+ return get_time_left(esdp, timeout);
+ return -1;
+ }
+
+ cncl_res = cancel_bif_timer(tmr);
+ if (!cncl_res)
+ return -1;
+
+ time_left = get_time_left(esdp, timeout);
+
+ if (sid != (Uint32) esdp->no) {
+ if (cncl_res > 0)
+ queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ }
+ else {
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
+ if (is_hlt) {
+ if (cncl_res > 0)
+ hl_timer_dec_refc(&tmr->type.hlt, tmr->type.hlt.head.roflgs);
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ }
+ else {
+ if (cncl_res > 0)
+ tw_timer_dec_refc(&tmr->type.twt);
+ cancel_tw_timer(esdp, &tmr->type.twt);
+ }
+ }
+
+ return time_left;
+}
+
+static ERTS_INLINE Eterm
+return_info(Process *c_p, Sint64 time_left)
+{
+ Uint hsz;
+ Eterm *hp;
+
+ if (time_left < 0)
+ return am_false;
+
+ if (time_left <= (Sint64) MAX_SMALL)
+ return make_small((Sint) time_left);
+
+ hsz = ERTS_SINT64_HEAP_SIZE(time_left);
+ hp = HAlloc(c_p, hsz);
+ return erts_sint64_to_big(time_left, &hp);
+}
+
+static ERTS_INLINE Eterm
+send_async_info(Process *proc, ErtsProcLocks initial_locks,
+ Eterm tref, int cancel, Sint64 time_left)
+{
+ ErtsProcLocks locks = initial_locks;
+ ErtsMessage *mp;
+ Eterm tag, res, msg, ref;
+ Uint hsz;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+
+ hsz = 4;
+ hsz += NC_HEAP_SIZE(tref);
+
+ if (time_left > (Sint64) MAX_SMALL)
+ hsz += ERTS_SINT64_HEAP_SIZE(time_left);
+
+ mp = erts_alloc_message_heap(proc, &locks, hsz, &hp, &ohp);
+
+ if (cancel)
+ tag = am_cancel_timer;
+ else
+ tag = am_read_timer;
+
+ ref = STORE_NC(&hp, ohp, tref);
+
+ if (time_left < 0)
+ res = am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ res = make_small((Sint) time_left);
+ else
+ res = erts_sint64_to_big(time_left, &hp);
+
+ msg = TUPLE3(hp, tag, ref, res);
+
+ erts_queue_message(proc, locks, mp, msg, am_clock_service);
+
+ locks &= ~initial_locks;
+ if (locks)
+ erts_proc_unlock(proc, locks);
+
+ return am_ok;
+}
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static BIF_RETTYPE
+access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
+{
+ BIF_RETTYPE ret;
+ Eterm res;
+ Sint64 time_left;
+
+ if (!is_internal_magic_ref(tref)) {
+ if (is_not_ref(tref)) {
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ return ret;
+ }
+ time_left = -1;
+ }
+ else {
+ ErtsMagicBinary *mbin;
+ mbin = (ErtsMagicBinary *) erts_magic_ref2bin(tref);
+ if (mbin->destructor != bif_timer_ref_destructor)
+ time_left = -1;
+ else {
+ ErtsBifTimer *tmr;
+ Uint32 sid;
+ tmr = magic_binary_to_btm(mbin);
+ sid = erts_get_ref_numbers_thr_id(internal_magic_ref_numbers(tref));
+ ASSERT(1 <= sid && sid <= erts_no_schedulers);
+ time_left = access_btm(tmr, sid, erts_proc_sched_data(c_p), cancel);
+ }
+ }
+
+ if (!info)
+ res = am_ok;
+ else if (!async)
+ res = return_info(c_p, time_left);
+ else
+ res = send_async_info(c_p, ERTS_PROC_LOCK_MAIN,
+ tref, cancel, time_left);
+
+ ERTS_BIF_PREP_RET(ret, res);
+
+ return ret;
+}
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+static ERTS_INLINE Eterm
+send_sync_info(Process *proc, ErtsProcLocks initial_locks,
+ Uint32 *refn, int cancel, Sint64 time_left)
+{
+ ErtsProcLocks locks = initial_locks;
+ ErtsMessage *mp;
+ Eterm res, msg, ref;
+ Uint hsz;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+
+ hsz = 3 + ERTS_REF_THING_SIZE;
+
+ if (time_left > (Sint64) MAX_SMALL)
+ hsz += ERTS_SINT64_HEAP_SIZE(time_left);
+
+ mp = erts_alloc_message_heap(proc, &locks, hsz, &hp, &ohp);
+
+ write_ref_thing(hp, refn[0], refn[1], refn[2]);
+ ref = make_internal_ref(hp);
+ hp += ERTS_REF_THING_SIZE;
+
+ if (time_left < 0)
+ res = am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ res = make_small((Sint) time_left);
+ else
+ res = erts_sint64_to_big(time_left, &hp);
+
+ msg = TUPLE2(hp, ref, res);
+
+ erts_queue_message(proc, locks, mp, msg, am_clock_service);
+
+ locks &= ~initial_locks;
+ if (locks)
+ erts_proc_unlock(proc, locks);
+
+ return am_ok;
+}
+
static ERTS_INLINE Eterm
access_sched_local_btm(Process *c_p, Eterm pid,
- Eterm tref, Uint32 *trefn,
- Uint32 *rrefn,
- int async, int cancel,
- int return_res,
- int info)
+ Eterm tref, Uint32 *trefn,
+ Uint32 *rrefn,
+ int async, int cancel,
+ int return_res,
+ int info)
{
ErtsSchedulerData *esdp;
ErtsHLTimerService *srv;
- ErtsHLTimer *tmr;
+ ErtsBifTimer *tmr;
Sint64 time_left;
Process *proc;
ErtsProcLocks proc_locks;
@@ -1884,111 +2273,40 @@ access_sched_local_btm(Process *c_p, Eterm pid,
srv = esdp->timer_service;
tmr = btm_rbt_lookup(srv->btm_tree, trefn);
- if (tmr) {
- if (!cancel) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->state);
- if (state == ERTS_TMR_STATE_ACTIVE)
- time_left = get_time_left(esdp, tmr->timeout);
- }
- else {
- int cncl_res = cancel_bif_timer(tmr);
- if (cncl_res) {
-
- time_left = get_time_left(esdp, tmr->timeout);
- if (cncl_res > 0)
- hl_timer_dec_refc(tmr, tmr->head.roflgs);
-
- hlt_delete_timer(esdp, tmr);
- }
- }
- }
+ time_left = access_btm(tmr, (Uint32) esdp->no, esdp, cancel);
if (!info)
- return am_ok;
-
- if (return_res) {
- ERTS_HLT_ASSERT(c_p);
- if (time_left < 0)
- return am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- return make_small((Sint) time_left);
- else {
- Uint hsz = ERTS_SINT64_HEAP_SIZE(time_left);
- Eterm *hp = HAlloc(c_p, hsz);
- return erts_sint64_to_big(time_left, &hp);
- }
- }
+ return am_ok;
if (c_p) {
- proc = c_p;
- proc_locks = ERTS_PROC_LOCK_MAIN;
+ proc = c_p;
+ proc_locks = ERTS_PROC_LOCK_MAIN;
}
else {
- proc = erts_proc_lookup(pid);
- proc_locks = 0;
+ proc = erts_proc_lookup(pid);
+ proc_locks = 0;
}
- if (proc) {
- Uint hsz;
- ErtsMessage *mp;
- Eterm *hp, msg, ref, result;
- ErlOffHeap *ohp;
- Uint32 *refn;
-#ifdef ERTS_HLT_DEBUG
- Eterm *hp_end;
-#endif
-
- hsz = REF_THING_SIZE;
- if (async) {
- refn = trefn; /* timer ref */
- hsz += 4; /* 3-tuple */
- }
- else {
- refn = rrefn; /* request ref */
- hsz += 3; /* 2-tuple */
- }
-
- ERTS_HLT_ASSERT(refn);
-
- if (time_left > (Sint64) MAX_SMALL)
- hsz += ERTS_SINT64_HEAP_SIZE(time_left);
-
- mp = erts_alloc_message_heap(proc, &proc_locks,
- hsz, &hp, &ohp);
-
-#ifdef ERTS_HLT_DEBUG
- hp_end = hp + hsz;
-#endif
-
- if (time_left < 0)
- result = am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- result = make_small((Sint) time_left);
- else
- result = erts_sint64_to_big(time_left, &hp);
-
- write_ref_thing(hp,
- refn[0],
- refn[1],
- refn[2]);
- ref = make_internal_ref(hp);
- hp += REF_THING_SIZE;
-
- msg = (async
- ? TUPLE3(hp, (cancel
- ? am_cancel_timer
- : am_read_timer), ref, result)
- : TUPLE2(hp, ref, result));
-
- ERTS_HLT_ASSERT(hp + (async ? 4 : 3) == hp_end);
-
- erts_queue_message(proc, proc_locks, mp, msg, am_clock_service);
-
- if (c_p)
- proc_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (proc_locks)
- erts_smp_proc_unlock(proc, proc_locks);
+ if (!async) {
+ if (c_p)
+ return return_info(c_p, time_left);
+
+ if (proc)
+ return send_sync_info(proc, proc_locks,
+ rrefn, cancel, time_left);
+ }
+ else if (proc) {
+ Eterm ref;
+ Eterm heap[ERTS_REF_THING_SIZE];
+ if (is_value(tref))
+ ref = tref;
+ else {
+ write_ref_thing(&heap[0], trefn[0], trefn[1], trefn[2]);
+ ref = make_internal_ref(&heap[0]);
+ }
+ return send_async_info(proc, proc_locks,
+ ref, cancel, time_left);
}
return am_ok;
@@ -2021,108 +2339,64 @@ bif_timer_access_request(void *vreq)
static int
try_access_sched_remote_btm(ErtsSchedulerData *esdp,
Process *c_p, Uint32 sid,
- Uint32 *trefn,
+ Eterm tref, Uint32 *trefn,
int async, int cancel,
int info, Eterm *resp)
{
- ErtsHLTimer *tmr;
+ ErtsBifTimer *tmr;
Sint64 time_left;
ERTS_HLT_ASSERT(c_p);
/*
* Check if the timer is aimed at current
- * process of if this process is an accessor
- * of the timer...
+ * process...
*/
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_BTM);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_BTM);
tmr = proc_btm_rbt_lookup(c_p->bif_timers, trefn);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (!tmr)
- tmr = abtm_rbt_lookup(c_p->accessor_bif_timers, trefn);
-#endif
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_BTM);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_BTM);
if (!tmr)
return 0;
- if (!cancel) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->state);
- if (state == ERTS_TMR_STATE_ACTIVE)
- time_left = get_time_left(esdp, tmr->timeout);
- else
- time_left = -1;
- }
- else {
- int cncl_res = cancel_bif_timer(tmr);
- if (!cncl_res)
- time_left = -1;
- else {
- time_left = get_time_left(esdp, tmr->timeout);
- if (cncl_res > 0)
- queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
- }
- }
+ time_left = access_btm(tmr, sid, esdp, cancel);
- if (!info) {
+ if (!info)
*resp = am_ok;
- return 1;
- }
-
- if (!async) {
- if (time_left < 0)
- *resp = am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- *resp = make_small((Sint) time_left);
- else {
- Uint hsz = ERTS_SINT64_HEAP_SIZE(time_left);
- Eterm *hp = HAlloc(c_p, hsz);
- *resp = erts_sint64_to_big(time_left, &hp);
- }
- }
- else {
- ErtsMessage *mp;
- Eterm tag, res, msg, tref;
- Uint hsz;
- Eterm *hp;
- ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN;
- ErlOffHeap *ohp;
-
- hsz = 4 + REF_THING_SIZE;
- if (time_left > (Sint64) MAX_SMALL)
- hsz += ERTS_SINT64_HEAP_SIZE(time_left);
-
- mp = erts_alloc_message_heap(c_p, &proc_locks,
- hsz, &hp, &ohp);
- if (cancel)
- tag = am_cancel_timer;
- else
- tag = am_read_timer;
-
- write_ref_thing(hp,
- trefn[0],
- trefn[1],
- trefn[2]);
- tref = make_internal_ref(hp);
- hp += REF_THING_SIZE;
-
- if (time_left < 0)
- res = am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- res = make_small((Sint) time_left);
- else
- res = erts_sint64_to_big(time_left, &hp);
-
- msg = TUPLE3(hp, tag, tref, res);
+ else if (!async)
+ *resp = return_info(c_p, time_left);
+ else
+ *resp = send_async_info(c_p, ERTS_PROC_LOCK_MAIN,
+ tref, cancel, time_left);
- erts_queue_message(c_p, proc_locks, mp, msg, am_clock_service);
+ return 1;
+}
- proc_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (proc_locks)
- erts_smp_proc_unlock(c_p, proc_locks);
+static Eterm
+no_timer_result(Process *c_p, Eterm tref, int cancel, int async, int info)
+{
+ ErtsMessage *mp;
+ Uint hsz;
+ Eterm *hp, msg, ref, tag;
+ ErlOffHeap *ohp;
+ ErtsProcLocks locks;
- *resp = am_ok;
- }
- return 1;
+ if (!async)
+ return am_false;
+ if (!info)
+ return am_ok;
+
+ hsz = 4;
+ hsz += NC_HEAP_SIZE(tref);
+ locks = ERTS_PROC_LOCK_MAIN;
+ mp = erts_alloc_message_heap(c_p, &locks, hsz, &hp, &ohp);
+ ref = STORE_NC(&hp, ohp, tref);
+ tag = cancel ? am_cancel_timer : am_read_timer;
+ msg = TUPLE3(hp, tag, ref, am_false);
+ erts_queue_message(c_p, locks, mp, msg, am_clock_service);
+ locks &= ~ERTS_PROC_LOCK_MAIN;
+ if (locks)
+ erts_proc_unlock(c_p, locks);
+ return am_ok;
}
static BIF_RETTYPE
@@ -2156,7 +2430,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
ERTS_BIF_PREP_RET(ret, res);
}
else if (try_access_sched_remote_btm(esdp, c_p,
- sid, trefn,
+ sid, tref, trefn,
async, cancel,
info, &res)) {
ERTS_BIF_PREP_RET(ret, res);
@@ -2189,7 +2463,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
Eterm *hp, rref;
Uint32 *rrefn;
- hp = HAlloc(c_p, REF_THING_SIZE);
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
rref = erts_sched_make_ref_in_buffer(esdp, hp);
rrefn = internal_ref_numbers(rref);
@@ -2197,28 +2471,21 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
req->rrefn[1] = rrefn[1];
req->rrefn[2] = rrefn[2];
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
-
- if (ERTS_PROC_PENDING_EXIT(c_p))
- ERTS_VBUMP_ALL_REDS(c_p);
- else {
- /*
- * Caller needs to wait for a message containing
- * the ref that we just created. No such message
- * can exist in callers message queue at this time.
- * We therefore move the save pointer of the
- * callers message queue to the end of the queue.
- *
- * NOTE: It is of vital importance that the caller
- * immediately do a receive unconditionaly
- * waiting for the message with the reference;
- * otherwise, next receive will *not* work
- * as expected!
- */
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
- c_p->msg.save = c_p->msg.last;
- }
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ /*
+ * Caller needs to wait for a message containing
+ * the ref that we just created. No such message
+ * can exist in callers message queue at this time.
+ * We therefore move the save pointer of the
+ * callers message queue to the end of the queue.
+ *
+ * NOTE: It is of vital importance that the caller
+ * immediately do a receive unconditionaly
+ * waiting for the message with the reference;
+ * otherwise, next receive will *not* work
+ * as expected!
+ */
+ ERTS_RECV_MARK_SAVE(c_p);
+ ERTS_RECV_MARK_SET(c_p);
ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, rref);
}
@@ -2235,11 +2502,11 @@ badarg:
return ret;
no_timer:
- ERTS_BIF_PREP_RET(ret, am_false);
- return ret;
-
+ return no_timer_result(c_p, tref, cancel, async, info);
}
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
static ERTS_INLINE int
bool_arg(Eterm val, int *argp)
{
@@ -2251,8 +2518,8 @@ bool_arg(Eterm val, int *argp)
}
static ERTS_INLINE int
-parse_bif_timer_options(Eterm option_list, int *async, int *info,
- int *abs, Eterm *accessor)
+parse_bif_timer_options(Eterm option_list, int *async,
+ int *info, int *abs)
{
Eterm list = option_list;
@@ -2262,8 +2529,6 @@ parse_bif_timer_options(Eterm option_list, int *async, int *info,
*info = 1;
if (abs)
*abs = 0;
- if (accessor)
- *accessor = THE_NON_VALUE;
while (is_list(list)) {
Eterm *consp, *tp, opt;
@@ -2290,13 +2555,6 @@ parse_bif_timer_options(Eterm option_list, int *async, int *info,
if (!abs || !bool_arg(tp[2], abs))
return 0;
break;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- case am_accessor:
- if (!accessor || is_not_internal_pid(tp[2]))
- return 0;
- *accessor = tp[2];
- break;
-#endif
default:
return 0;
}
@@ -2310,42 +2568,57 @@ parse_bif_timer_options(Eterm option_list, int *async, int *info,
}
static void
-exit_cancel_bif_timer(ErtsHLTimer *tmr, void *vesdp)
+exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp)
{
ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
Uint32 sid, roflgs;
erts_aint_t state;
+ int is_hlt;
- state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
+ state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state,
ERTS_TMR_STATE_CANCELED,
ERTS_TMR_STATE_ACTIVE);
- roflgs = tmr->head.roflgs;
+ roflgs = tmr->type.head.roflgs;
sid = roflgs & ERTS_TMR_ROFLG_SID_MASK;
+ is_hlt = !!(roflgs & ERTS_TMR_ROFLG_HLT);
- ERTS_HLT_ASSERT(sid == erts_get_ref_numbers_thr_id(tmr->btm.refn));
+ ERTS_HLT_ASSERT(sid == erts_get_ref_numbers_thr_id(ERTS_BTM_HLT2REFN(tmr)));
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ERTS_HLT_ASSERT(tmr->btm.proc_list.next);
+#else
ERTS_HLT_ASSERT(tmr->btm.proc_tree.parent
!= ERTS_HLT_PFIELD_NOT_IN_TABLE);
-
tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+#endif
- if (sid == (Uint32) esdp->no) {
- if (state == ERTS_TMR_STATE_ACTIVE) {
- if (tmr->btm.bp)
- free_message_buffer(tmr->btm.bp);
- hlt_delete_timer(esdp, tmr);
- }
- hl_timer_dec_refc(tmr, roflgs);
- }
- else {
- if (state == ERTS_TMR_STATE_ACTIVE) {
- if (tmr->btm.bp)
- free_message_buffer(tmr->btm.bp);
- queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ if (state == ERTS_TMR_STATE_ACTIVE) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+
+ if (sid != (Uint32) esdp->no) {
+ queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ return;
+ }
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
}
- else
- hl_timer_dec_refc(tmr, roflgs);
+#endif
+ if (is_hlt)
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ else
+ cancel_tw_timer(esdp, &tmr->type.twt);
}
+ if (is_hlt)
+ hl_timer_dec_refc(&tmr->type.hlt, roflgs);
+ else
+ tw_timer_dec_refc(&tmr->type.twt);
}
#ifdef ERTS_HLT_DEBUG
@@ -2354,20 +2627,29 @@ exit_cancel_bif_timer(ErtsHLTimer *tmr, void *vesdp)
# define ERTS_BTM_MAX_DESTROY_LIMIT 50
#endif
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
typedef struct {
ErtsBifTimers *bif_timers;
union {
proc_btm_rbt_yield_state_t proc_btm_yield_state;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- abtm_rbt_yield_state_t abtm_yield_state;
-#endif
} u;
} ErtsBifTimerYieldState;
+#endif
-int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
+int erts_cancel_bif_timers(Process *p, ErtsBifTimers **btm, void **vyspp)
{
ErtsSchedulerData *esdp = erts_proc_sched_data(p);
- ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+ return proc_btm_list_foreach_destroy_yielding(btm,
+ exit_cancel_bif_timer,
+ (void *) esdp,
+ ERTS_BTM_MAX_DESTROY_LIMIT);
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+ ErtsBifTimerYieldState ys = {*btm, {ERTS_RBT_YIELD_STAT_INITER}};
ErtsBifTimerYieldState *ysp;
int res;
@@ -2399,63 +2681,18 @@ int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
}
return res;
-}
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-
-static void
-detach_bif_timer(ErtsHLTimer *tmr, void *vesdp)
-{
- tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- hl_timer_dec_refc(tmr, tmr->head.roflgs);
-}
-
-int erts_detach_accessor_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
-{
- ErtsSchedulerData *esdp = erts_proc_sched_data(p);
- ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
- ErtsBifTimerYieldState *ysp;
- int res;
-
- ysp = (ErtsBifTimerYieldState *) *vyspp;
- if (!ysp)
- ysp = &ys;
-
- res = abtm_rbt_foreach_destroy_yielding(&ysp->bif_timers,
- detach_bif_timer,
- (void *) esdp,
- &ysp->u.abtm_yield_state,
- ERTS_BTM_MAX_DESTROY_LIMIT);
-
- if (res == 0) {
- if (ysp != &ys)
- erts_free(ERTS_ALC_T_BTM_YIELD_STATE, ysp);
- *vyspp = NULL;
- }
- else {
-
- if (ysp == &ys) {
- ysp = erts_alloc(ERTS_ALC_T_BTM_YIELD_STATE,
- sizeof(ErtsBifTimerYieldState));
- sys_memcpy((void *) ysp, (void *) &ys,
- sizeof(ErtsBifTimerYieldState));
- }
-
- *vyspp = (void *) ysp;
- }
- return res;
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
}
-#endif /* ERTS_BTM_ACCESSOR_SUPPORT */
-
static ERTS_INLINE int
parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
ErtsMonotonicTime *conv_arg, int abs,
- ErtsMonotonicTime *tposp, int *stimep)
+ ErtsMonotonicTime *tposp, int *stimep,
+ ErtsMonotonicTime *msp)
{
- ErtsMonotonicTime t;
-
+ ErtsMonotonicTime t, now;
+
if (!term_to_Sint64(arg, &t)) {
ERTS_HLT_ASSERT(!is_small(arg));
if (!is_big(arg))
@@ -2470,22 +2707,30 @@ parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
if (conv_arg)
*conv_arg = t;
+ now = erts_get_monotonic_time(esdp);
+
if (abs) {
t += -1*ERTS_MONOTONIC_OFFSET_MSEC; /* external to internal */
if (t < ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_BEGIN))
return 1;
if (t > ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_END))
return 1;
+ if (msp)
+ *msp = t - ERTS_MONOTONIC_TO_MSEC(now);
+
*stimep = (t - ERTS_MONOTONIC_TO_MSEC(esdp->last_monotonic_time)
< ERTS_BIF_TIMER_SHORT_TIME);
*tposp = ERTS_MSEC_TO_CLKTCKS(t);
}
else {
- ErtsMonotonicTime now, ticks;
+ ErtsMonotonicTime ticks;
if (t < 0)
return -1;
+ if (msp)
+ *msp = t;
+
ticks = ERTS_MSEC_TO_CLKTCKS(t);
if (ERTS_CLKTCK_RESOLUTION > 1000 && ticks < 0)
@@ -2493,7 +2738,6 @@ parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
ERTS_HLT_ASSERT(ticks >= 0);
- now = erts_get_monotonic_time(esdp);
ticks += ERTS_MONOTONIC_TO_CLKTCKS(now-1);
ticks += 1;
@@ -2516,66 +2760,68 @@ parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
BIF_RETTYPE send_after_3(BIF_ALIST_3)
{
- ErtsMonotonicTime timeout_pos;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, tres;
- tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- 0, &timeout_pos, &short_time);
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1,
+ NULL, 0, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, BIF_ARG_2, BIF_ARG_3, 0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, 0);
}
BIF_RETTYPE send_after_4(BIF_ALIST_4)
{
- ErtsMonotonicTime timeout_pos;
- Eterm accessor;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, abs, tres;
- if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
+ if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs))
BIF_ERROR(BIF_P, BADARG);
tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- abs, &timeout_pos, &short_time);
+ abs, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, accessor, BIF_ARG_3, 0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, 0);
}
BIF_RETTYPE start_timer_3(BIF_ALIST_3)
{
- ErtsMonotonicTime timeout_pos;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, tres;
tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- 0, &timeout_pos, &short_time);
+ 0, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, BIF_ARG_2, BIF_ARG_3, !0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, !0);
}
BIF_RETTYPE start_timer_4(BIF_ALIST_4)
{
- ErtsMonotonicTime timeout_pos;
- Eterm accessor;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, abs, tres;
- if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
+ if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs))
BIF_ERROR(BIF_P, BADARG);
tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- abs, &timeout_pos, &short_time);
+ abs, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, accessor, BIF_ARG_3, !0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, !0);
}
BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
@@ -2588,7 +2834,7 @@ BIF_RETTYPE cancel_timer_2(BIF_ALIST_2)
BIF_RETTYPE ret;
int async, info;
- if (parse_bif_timer_options(BIF_ARG_2, &async, &info, NULL, NULL))
+ if (parse_bif_timer_options(BIF_ARG_2, &async, &info, NULL))
return access_bif_timer(BIF_P, BIF_ARG_1, 1, async, info);
ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
@@ -2605,7 +2851,7 @@ BIF_RETTYPE read_timer_2(BIF_ALIST_2)
BIF_RETTYPE ret;
int async;
- if (parse_bif_timer_options(BIF_ARG_2, &async, NULL, NULL, NULL))
+ if (parse_bif_timer_options(BIF_ARG_2, &async, NULL, NULL))
return access_bif_timer(BIF_P, BIF_ARG_1, 0, async, 1);
ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
@@ -2620,14 +2866,13 @@ start_callback_timer(ErtsSchedulerData *esdp,
void *arg)
{
- if (twt)
- create_tw_timer(esdp, ERTS_TMR_CALLBACK, NULL,
- callback, arg, timeout_pos);
- else
- create_hl_timer(esdp, timeout_pos, 0,
- ERTS_TMR_CALLBACK, NULL,
- NIL, THE_NON_VALUE, NIL,
- NULL, callback, arg);
+ ErtsCreateTimerFunc create_timer = (twt
+ ? create_tw_timer
+ : create_hl_timer);
+ (void) create_timer(esdp, timeout_pos, 0,
+ ERTS_TMR_CALLBACK, NULL,
+ NIL, THE_NON_VALUE, NULL,
+ callback, arg);
}
typedef struct {
@@ -2666,7 +2911,7 @@ erts_start_timer_callback(ErtsMonotonicTime tmo,
tmo);
twt = tmo < ERTS_TIMER_WHEEL_MSEC;
- if (esdp)
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp))
start_callback_timer(esdp,
twt,
timeout_pos,
@@ -2704,19 +2949,19 @@ set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo,
if (tmo == 0)
c_p->flags |= F_TIMO;
else {
+ ErtsCreateTimerFunc create_timer;
c_p->flags |= F_INSLPQUEUE;
c_p->flags &= ~F_TIMO;
- if (tmo < ERTS_TIMER_WHEEL_MSEC)
- tmr = (void *) create_tw_timer(esdp, ERTS_TMR_PROC, (void *) c_p,
- NULL, NULL, timeout_pos);
- else
- tmr = (void *) create_hl_timer(esdp, timeout_pos, short_time,
- ERTS_TMR_PROC, (void *) c_p,
- c_p->common.id, THE_NON_VALUE,
- NIL, NULL, NULL, NULL);
- erts_smp_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr);
+ create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
+ ? create_tw_timer
+ : create_hl_timer);
+ tmr = (void *) create_timer(esdp, timeout_pos, short_time,
+ ERTS_TMR_PROC, (void *) c_p,
+ c_p->common.id, THE_NON_VALUE,
+ NULL, NULL, NULL);
+ erts_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr);
}
}
@@ -2727,11 +2972,11 @@ erts_set_proc_timer_term(Process *c_p, Eterm etmo)
ErtsMonotonicTime tmo, timeout_pos;
int short_time, tres;
- ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer)
+ ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer)
== ERTS_PTMR_NONE);
tres = parse_timeout_pos(esdp, etmo, &tmo, 0,
- &timeout_pos, &short_time);
+ &timeout_pos, &short_time, NULL);
if (tres != 0)
return tres;
@@ -2747,7 +2992,7 @@ erts_set_proc_timer_uword(Process *c_p, UWord tmo)
{
ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
- ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer)
+ ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer)
== ERTS_PTMR_NONE);
#ifndef ARCH_32
@@ -2770,13 +3015,13 @@ void
erts_cancel_proc_timer(Process *c_p)
{
erts_aint_t tval;
- tval = erts_smp_atomic_xchg_acqb(&c_p->common.timer,
+ tval = erts_atomic_xchg_acqb(&c_p->common.timer,
ERTS_PTMR_NONE);
c_p->flags &= ~(F_INSLPQUEUE|F_TIMO);
if (tval == ERTS_PTMR_NONE)
return;
if (tval == ERTS_PTMR_TIMEDOUT) {
- erts_smp_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE);
+ erts_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE);
return;
}
continue_cancel_ptimer(erts_proc_sched_data(c_p),
@@ -2789,29 +3034,37 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo)
void *tmr;
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsMonotonicTime timeout_pos;
+ ErtsCreateTimerFunc create_timer;
- if (erts_smp_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE)
+ if (erts_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE)
erts_cancel_port_timer(c_prt);
check_canceled_queue(esdp, esdp->timer_service);
- timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
+ if (tmo == 0) {
+ erts_atomic_set_relb(&c_prt->common.timer, ERTS_PTMR_TIMEDOUT);
+ erts_port_task_schedule(c_prt->common.id,
+ &c_prt->timeout_task,
+ ERTS_PORT_TASK_TIMEOUT);
+ } else {
- if (tmo < ERTS_TIMER_WHEEL_MSEC)
- tmr = (void *) create_tw_timer(esdp, ERTS_TMR_PORT, (void *) c_prt,
- NULL, NULL, timeout_pos);
- else
- tmr = (void *) create_hl_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
- (void *) c_prt, c_prt->common.id,
- THE_NON_VALUE, NIL, NULL, NULL, NULL);
- erts_smp_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
+ timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
+
+ create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
+ ? create_tw_timer
+ : create_hl_timer);
+ tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
+ (void *) c_prt, c_prt->common.id,
+ THE_NON_VALUE, NULL, NULL, NULL);
+ erts_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
+ }
}
void
erts_cancel_port_timer(Port *c_prt)
{
erts_aint_t tval;
- tval = erts_smp_atomic_xchg_acqb(&c_prt->common.timer,
+ tval = erts_atomic_xchg_acqb(&c_prt->common.timer,
ERTS_PTMR_NONE);
if (tval == ERTS_PTMR_NONE)
return;
@@ -2819,7 +3072,7 @@ erts_cancel_port_timer(Port *c_prt)
while (!erts_port_task_is_scheduled(&c_prt->timeout_task))
erts_thr_yield();
erts_port_task_abort(&c_prt->timeout_task);
- erts_smp_atomic_set_nob(&c_prt->common.timer, ERTS_PTMR_NONE);
+ erts_atomic_set_nob(&c_prt->common.timer, ERTS_PTMR_NONE);
return;
}
continue_cancel_ptimer(erts_get_scheduler_data(),
@@ -2833,7 +3086,7 @@ erts_read_port_timer(Port *c_prt)
erts_aint_t itmr;
ErtsMonotonicTime timeout_pos;
- itmr = erts_smp_atomic_read_acqb(&c_prt->common.timer);
+ itmr = erts_atomic_read_acqb(&c_prt->common.timer);
if (itmr == ERTS_PTMR_NONE)
return (Sint64) -1;
if (itmr == ERTS_PTMR_TIMEDOUT)
@@ -2842,7 +3095,7 @@ erts_read_port_timer(Port *c_prt)
if (tmr->head.roflgs & ERTS_TMR_ROFLG_HLT)
timeout_pos = tmr->hlt.timeout;
else
- timeout_pos = tmr->twt.tw_tmr.timeout_pos;
+ timeout_pos = erts_tweel_read_timeout(&tmr->twt.u.tw_tmr);
return get_time_left(NULL, timeout_pos);
}
@@ -2851,25 +3104,41 @@ erts_read_port_timer(Port *c_prt)
*/
typedef struct {
- int to;
+ fmtfn_t to;
void *to_arg;
ErtsMonotonicTime now;
} ErtsBTMPrint;
static void
-btm_print(ErtsHLTimer *tmr, void *vbtmp)
+btm_print(ErtsBifTimer *tmr, void *vbtmp, ErtsMonotonicTime tpos, int is_hlt)
{
ErtsBTMPrint *btmp = (ErtsBTMPrint *) vbtmp;
ErtsMonotonicTime left;
Eterm receiver;
- if (tmr->timeout <= btmp->now)
- left = 0;
- left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ return;
+#endif
- receiver = ((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
- ? tmr->receiver.name
- : tmr->receiver.proc->common.id);
+ if (is_hlt) {
+ ERTS_HLT_ASSERT(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ if (tmr->type.hlt.timeout <= btmp->now)
+ left = 0;
+ else
+ left = ERTS_CLKTCKS_TO_MSEC(tmr->type.hlt.timeout - btmp->now);
+ }
+ else {
+ ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT));
+ if (tpos <= btmp->now)
+ left = 0;
+ else
+ left = ERTS_CLKTCKS_TO_MSEC(tpos - btmp->now);
+ }
+
+ receiver = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
+ ? tmr->type.head.receiver.name
+ : tmr->type.head.receiver.proc->common.id);
erts_print(btmp->to, btmp->to_arg,
"=timer:%T\n"
@@ -2880,8 +3149,38 @@ btm_print(ErtsHLTimer *tmr, void *vbtmp)
(Sint64) left);
}
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static void
+hlt_btm_print(ErtsHLTimer *tmr, void *vbtmp)
+{
+ btm_print((ErtsBifTimer *) tmr, vbtmp, 0, 1);
+}
+
+static void
+twt_btm_print(void *vbtmp, ErtsMonotonicTime tpos, void *vtwtp)
+{
+ btm_print((ErtsBifTimer *) vtwtp, vbtmp, tpos, 0);
+}
+
+#else
+
+static void
+btm_tree_print(ErtsBifTimer *tmr, void *vbtmp)
+{
+ int is_hlt = !!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ ErtsMonotonicTime tpos;
+ if (is_hlt)
+ tpos = 0;
+ else
+ tpos = erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr);
+ btm_print(tmr, vbtmp, tpos, is_hlt);
+}
+
+#endif
+
void
-erts_print_bif_timer_info(int to, void *to_arg)
+erts_print_bif_timer_info(fmtfn_t to, void *to_arg)
{
ErtsBTMPrint btmp;
int six;
@@ -2897,7 +3196,15 @@ erts_print_bif_timer_info(int to, void *to_arg)
for (six = 0; six < erts_no_schedulers; six++) {
ErtsHLTimerService *srv =
erts_aligned_scheduler_data[six].esd.timer_service;
- btm_rbt_foreach(srv->btm_tree, btm_print, (void *) &btmp);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsTimerWheel *twheel =
+ erts_aligned_scheduler_data[six].esd.timer_wheel;
+ erts_twheel_debug_foreach(twheel, tw_bif_timer_timeout,
+ twt_btm_print, (void *) &btmp);
+ time_rbt_foreach(srv->time_tree, hlt_btm_print, (void *) &btmp);
+#else
+ btm_rbt_foreach(srv->btm_tree, btm_tree_print, (void *) &btmp);
+#endif
}
}
@@ -2910,19 +3217,37 @@ typedef struct {
} ErtsBTMForeachDebug;
static void
-debug_btm_foreach(ErtsHLTimer *tmr, void *vbtmfd)
+debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd)
{
- if (erts_smp_atomic32_read_nob(&tmr->state) == ERTS_TMR_STATE_ACTIVE) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ return;
+#endif
+ if (erts_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) {
ErtsBTMForeachDebug *btmfd = (ErtsBTMForeachDebug *) vbtmfd;
- (*btmfd->func)(((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
- ? tmr->receiver.name
- : tmr->receiver.proc->common.id),
- tmr->btm.message,
- tmr->btm.bp,
- btmfd->arg);
+ Eterm id = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
+ ? tmr->type.head.receiver.name
+ : tmr->type.head.receiver.proc->common.id);
+ (*btmfd->func)(id, tmr->btm.message, tmr->btm.bp, btmfd->arg);
}
}
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static void
+hlt_debug_btm_foreach(ErtsHLTimer *tmr, void *vbtmfd)
+{
+ debug_btm_foreach((ErtsBifTimer *) tmr, vbtmfd);
+}
+
+static void
+twt_debug_btm_foreach(void *vbtmfd, ErtsMonotonicTime tpos, void *vtwtp)
+{
+ debug_btm_foreach((ErtsBifTimer *) vtwtp, vbtmfd);
+}
+
+#endif
+
void
erts_debug_bif_timer_foreach(void (*func)(Eterm,
Eterm,
@@ -2936,15 +3261,26 @@ erts_debug_bif_timer_foreach(void (*func)(Eterm,
btmfd.func = func;
btmfd.arg = arg;
- if (!erts_smp_thr_progress_is_blocking())
+ if (!erts_thr_progress_is_blocking())
ERTS_INTERNAL_ERROR("Not blocking thread progress");
for (six = 0; six < erts_no_schedulers; six++) {
ErtsHLTimerService *srv =
erts_aligned_scheduler_data[six].esd.timer_service;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsTimerWheel *twheel =
+ erts_aligned_scheduler_data[six].esd.timer_wheel;
+ erts_twheel_debug_foreach(twheel, tw_bif_timer_timeout,
+ twt_debug_btm_foreach,
+ (void *) &btmfd);
+ time_rbt_foreach(srv->time_tree,
+ hlt_debug_btm_foreach,
+ (void *) &btmfd);
+#else
btm_rbt_foreach(srv->btm_tree,
debug_btm_foreach,
(void *) &btmfd);
+#endif
}
}
@@ -2963,7 +3299,7 @@ debug_callback_timer_foreach_list(ErtsHLTimer *tmr, void *vdfct)
= (ErtsDebugForeachCallbackTimer *) vdfct;
if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
- && (tmr->receiver.callback == dfct->tclbk))
+ && (tmr->head.receiver.callback == dfct->tclbk))
(*dfct->func)(dfct->arg,
tmr->timeout,
tmr->head.u.arg);
@@ -2981,7 +3317,7 @@ debug_callback_timer_foreach(ErtsHLTimer *tmr, void *vdfct)
vdfct);
if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
- && (tmr->receiver.callback == dfct->tclbk))
+ && (tmr->head.receiver.callback == dfct->tclbk))
(*dfct->func)(dfct->arg,
tmr->timeout,
tmr->head.u.arg);
@@ -2996,7 +3332,7 @@ debug_tw_callback_timer(void *vdfct,
ErtsDebugForeachCallbackTimer *dfct
= (ErtsDebugForeachCallbackTimer *) vdfct;
- if (twtp->u.callback == dfct->tclbk)
+ if (twtp->head.receiver.callback == dfct->tclbk)
(*dfct->func)(dfct->arg,
timeout_pos,
twtp->head.u.arg);
@@ -3016,7 +3352,7 @@ erts_debug_callback_timer_foreach(void (*tclbk)(void *),
dfct.func = func;
dfct.arg = arg;
- if (!erts_smp_thr_progress_is_blocking())
+ if (!erts_thr_progress_is_blocking())
ERTS_INTERNAL_ERROR("Not blocking thread progress");
for (six = 0; six < erts_no_schedulers; six++) {
@@ -3067,7 +3403,9 @@ st_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
}
ERTS_HLT_ASSERT(tmr->time.tree.u.l.next->time.tree.u.l.prev == tmr);
ERTS_HLT_ASSERT(tmr->time.tree.u.l.prev->time.tree.u.l.next == tmr);
- ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, tmr->btm.refn) == tmr);
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
+#endif
}
static void
@@ -3096,8 +3434,10 @@ tt_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
& ~ERTS_HLT_PFLGS_MASK);
ERTS_HLT_ASSERT(tmr == prnt);
}
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR)
- ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, tmr->btm.refn) == tmr);
+ ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
+#endif
if (tmr->time.tree.same_time) {
ErtsHdbgHLT st_hdbg;
st_hdbg.srv = hdbg->srv;
@@ -3163,6 +3503,7 @@ hdbg_chk_srv(ErtsHLTimerService *srv)
time_rbt_foreach(srv->time_tree, tt_hdbg_func, (void *) &hdbg);
ERTS_HLT_ASSERT(hdbg.found_root);
}
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (srv->btm_tree) {
ErtsHdbgHLT hdbg;
hdbg.srv = srv;
@@ -3171,6 +3512,7 @@ hdbg_chk_srv(ErtsHLTimerService *srv)
btm_rbt_foreach(srv->btm_tree, bt_hdbg_func, (void *) &hdbg);
ERTS_HLT_ASSERT(hdbg.found_root);
}
+#endif
}
#endif /* ERTS_HLT_HARD_DEBUG */
diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h
index 0931bb8965..e6f5e8b67d 100644
--- a/erts/emulator/beam/erl_hl_timer.h
+++ b/erts/emulator/beam/erl_hl_timer.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2015. All Rights Reserved.
+ * Copyright Ericsson AB 2015-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@
#ifndef ERL_HL_TIMER_H__
#define ERL_HL_TIMER_H__
-typedef struct ErtsHLTimer_ ErtsBifTimers;
+typedef struct ErtsBifTimer_ ErtsBifTimers;
typedef struct ErtsHLTimerService_ ErtsHLTimerService;
#include "sys.h"
@@ -36,16 +36,16 @@ typedef struct ErtsHLTimerService_ ErtsHLTimerService;
#define ERTS_PTMR_TIMEDOUT (ERTS_PTMR_NONE + ((erts_aint_t) 1))
#define ERTS_PTMR_INIT(P) \
- erts_smp_atomic_init_nob(&(P)->common.timer, ERTS_PTMR_NONE)
+ erts_atomic_init_nob(&(P)->common.timer, ERTS_PTMR_NONE)
#define ERTS_PTMR_IS_SET(P) \
- (ERTS_PTMR_NONE != erts_smp_atomic_read_nob(&(P)->common.timer))
+ (ERTS_PTMR_NONE != erts_atomic_read_nob(&(P)->common.timer))
#define ERTS_PTMR_IS_TIMED_OUT(P) \
- (ERTS_PTMR_TIMEDOUT == erts_smp_atomic_read_nob(&(P)->common.timer))
+ (ERTS_PTMR_TIMEDOUT == erts_atomic_read_nob(&(P)->common.timer))
#define ERTS_PTMR_CLEAR(P) \
do { \
ASSERT(ERTS_PTMR_IS_TIMED_OUT((P))); \
- erts_smp_atomic_set_nob(&(P)->common.timer, \
+ erts_atomic_set_nob(&(P)->common.timer, \
ERTS_PTMR_NONE); \
} while (0)
@@ -56,23 +56,21 @@ void erts_cancel_proc_timer(Process *);
void erts_set_port_timer(Port *, Sint64);
void erts_cancel_port_timer(Port *);
Sint64 erts_read_port_timer(Port *);
-int erts_cancel_bif_timers(Process *, ErtsBifTimers *, void **);
+int erts_cancel_bif_timers(Process *, ErtsBifTimers **, void **);
int erts_detach_accessor_bif_timers(Process *, ErtsBifTimers *, void **);
ErtsHLTimerService *erts_create_timer_service(void);
void erts_hl_timer_init(void);
void erts_start_timer_callback(ErtsMonotonicTime,
void (*)(void *),
void *);
-#ifdef ERTS_SMP
void
erts_handle_canceled_timers(void *vesdp,
int *need_thr_progress,
ErtsThrPrgrVal *thr_prgr_p,
int *need_more_work);
-#endif
Uint erts_bif_timer_memory_size(void);
-void erts_print_bif_timer_info(int to, void *to_arg);
+void erts_print_bif_timer_info(fmtfn_t to, void *to_arg);
void erts_debug_bif_timer_foreach(void (*func)(Eterm,
Eterm,
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 781bf024dd..99e788c718 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -37,7 +37,7 @@
#include "erl_mseg.h"
#include "erl_threads.h"
#include "erl_hl_timer.h"
-#include "erl_instrument.h"
+#include "erl_mtrace.h"
#include "erl_printf_term.h"
#include "erl_misc_utils.h"
#include "packet_parser.h"
@@ -49,6 +49,9 @@
#include "erl_bif_unique.h"
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
+#include "erl_check_io.h"
+#include "erl_osenv.h"
+#include "erl_proc_sig_queue.h"
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
@@ -59,29 +62,27 @@
# include <sys/resource.h>
#endif
-#define ERTS_DEFAULT_NO_ASYNC_THREADS 10
+#define ERTS_DEFAULT_NO_ASYNC_THREADS 1
+
+#define ERTS_DEFAULT_SCHED_STACK_SIZE 128
+#define ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE 40
+#define ERTS_DEFAULT_DIO_SCHED_STACK_SIZE 40
/*
* The variables below (prefixed with etp_) are for erts/etc/unix/etp-commands
* only. Do not remove even though they aren't used elsewhere in the emulator!
*/
-#ifdef ERTS_SMP
const int etp_smp_compiled = 1;
-#else
-const int etp_smp_compiled = 0;
-#endif
-#ifdef USE_THREADS
const int etp_thread_compiled = 1;
-#else
-const int etp_thread_compiled = 0;
-#endif
const char etp_erts_version[] = ERLANG_VERSION;
const char etp_otp_release[] = ERLANG_OTP_RELEASE;
const char etp_compile_date[] = ERLANG_COMPILE_DATE;
const char etp_arch[] = ERLANG_ARCHITECTURE;
#ifdef ERTS_ENABLE_KERNEL_POLL
+const int erts_use_kernel_poll = 1;
const int etp_kernel_poll_support = 1;
#else
+const int erts_use_kernel_poll = 0;
const int etp_kernel_poll_support = 0;
#endif
#if defined(ARCH_64)
@@ -111,12 +112,23 @@ const int etp_lock_check = 1;
#else
const int etp_lock_check = 0;
#endif
-#ifdef WORDS_BIGENDIAN
-const int etp_big_endian = 1;
+const int etp_endianness = ERTS_ENDIANNESS;
+const Eterm etp_ref_header = ERTS_REF_THING_HEADER;
+#ifdef ERTS_MAGIC_REF_THING_HEADER
+const Eterm etp_magic_ref_header = ERTS_MAGIC_REF_THING_HEADER;
#else
-const int etp_big_endian = 0;
+const Eterm etp_magic_ref_header = ERTS_REF_THING_HEADER;
#endif
const Eterm etp_the_non_value = THE_NON_VALUE;
+#ifdef ERTS_HOLE_MARKER
+const Eterm etp_hole_marker = ERTS_HOLE_MARKER;
+#else
+const Eterm etp_hole_marker = 0;
+#endif
+
+static int modified_sched_thread_suggested_stack_size = 0;
+
+Eterm erts_init_process_id;
/*
* Note about VxWorks: All variables must be initialized by executable code,
@@ -143,20 +155,10 @@ static void erl_init(int ncpu,
static erts_atomic_t exiting;
-#ifdef ERTS_SMP
-erts_smp_atomic32_t erts_writing_erl_crash_dump;
+erts_atomic32_t erts_writing_erl_crash_dump;
erts_tsd_key_t erts_is_crash_dumping_key;
-#else
-volatile int erts_writing_erl_crash_dump = 0;
-#endif
int erts_initialized = 0;
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-erts_tid_t erts_main_thread;
-#endif
-
-int erts_use_sender_punish;
-
/*
* Configurable parameters.
*/
@@ -172,7 +174,7 @@ int erts_backtrace_depth; /* How many functions to show in a backtrace
* in error codes.
*/
-erts_smp_atomic32_t erts_max_gen_gcs;
+erts_atomic32_t erts_max_gen_gcs;
Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
am_info or am_warning, am_error is
@@ -182,11 +184,9 @@ int erts_compat_rel;
static int no_schedulers;
static int no_schedulers_online;
-#ifdef ERTS_DIRTY_SCHEDULERS
static int no_dirty_cpu_schedulers;
static int no_dirty_cpu_schedulers_online;
static int no_dirty_io_schedulers;
-#endif
#ifdef DEBUG
Uint32 verbose; /* See erl_debug.h for information about verbose */
@@ -207,16 +207,16 @@ int erts_no_line_info = 0; /* -L: Don't load line information */
*/
ErtsModifiedTimings erts_modified_timings[] = {
- /* 0 */ {make_small(0), CONTEXT_REDS, INPUT_REDUCTIONS},
- /* 1 */ {make_small(0), (3*CONTEXT_REDS)/4, 2*INPUT_REDUCTIONS},
- /* 2 */ {make_small(0), CONTEXT_REDS/2, INPUT_REDUCTIONS/2},
- /* 3 */ {make_small(0), (7*CONTEXT_REDS)/8, 3*INPUT_REDUCTIONS},
- /* 4 */ {make_small(0), CONTEXT_REDS/3, 3*INPUT_REDUCTIONS},
- /* 5 */ {make_small(0), (10*CONTEXT_REDS)/11, INPUT_REDUCTIONS/2},
- /* 6 */ {make_small(1), CONTEXT_REDS/4, 2*INPUT_REDUCTIONS},
- /* 7 */ {make_small(1), (5*CONTEXT_REDS)/7, INPUT_REDUCTIONS/3},
- /* 8 */ {make_small(10), CONTEXT_REDS/5, 3*INPUT_REDUCTIONS},
- /* 9 */ {make_small(10), (6*CONTEXT_REDS)/7, INPUT_REDUCTIONS/4}
+ /* 0 */ {make_small(0), CONTEXT_REDS},
+ /* 1 */ {make_small(0), (3*CONTEXT_REDS)/4},
+ /* 2 */ {make_small(0), CONTEXT_REDS/2},
+ /* 3 */ {make_small(0), (7*CONTEXT_REDS)/8},
+ /* 4 */ {make_small(0), CONTEXT_REDS/3},
+ /* 5 */ {make_small(0), (10*CONTEXT_REDS)/11},
+ /* 6 */ {make_small(1), CONTEXT_REDS/4},
+ /* 7 */ {make_small(1), (5*CONTEXT_REDS)/7},
+ /* 8 */ {make_small(10), CONTEXT_REDS/5},
+ /* 9 */ {make_small(10), (6*CONTEXT_REDS)/7}
};
#define ERTS_MODIFIED_TIMING_LEVELS \
@@ -242,7 +242,7 @@ progname(char *fullname)
{
int i;
- i = strlen(fullname);
+ i = sys_strlen(fullname);
while (i >= 0) {
if ((fullname[i] != '/') && (fullname[i] != '\\'))
i--;
@@ -296,30 +296,6 @@ void erl_error(char *fmt, va_list args)
static int early_init(int *argc, char **argv);
-void
-erts_short_init(void)
-{
-
- int ncpu;
- int time_correction;
- ErtsTimeWarpMode time_warp_mode;
-
- set_default_time_adj(&time_correction,
- &time_warp_mode);
- ncpu = early_init(NULL, NULL);
- erl_init(ncpu,
- ERTS_DEFAULT_MAX_PROCESSES,
- 0,
- ERTS_DEFAULT_MAX_PORTS,
- 0,
- 0,
- time_correction,
- time_warp_mode,
- ERTS_NODE_TAB_DELAY_GC_DEFAULT,
- ERTS_DB_SPNCNT_NORMAL);
- erts_initialized = 1;
-}
-
static void
erl_init(int ncpu,
int proc_tab_sz,
@@ -332,18 +308,18 @@ erl_init(int ncpu,
int node_tab_delete_delay,
ErtsDbSpinCount db_spin_count)
{
+ erts_monitor_link_init();
+ erts_proc_sig_queue_init();
erts_bif_unique_init();
- erts_init_monitors();
erts_init_time(time_correction, time_warp_mode);
erts_init_sys_common_misc();
erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab);
erts_init_scheduling(no_schedulers,
- no_schedulers_online
-#ifdef ERTS_DIRTY_SCHEDULERS
- , no_dirty_cpu_schedulers,
+ no_schedulers_online,
+ erts_no_poll_threads,
+ no_dirty_cpu_schedulers,
no_dirty_cpu_schedulers_online,
no_dirty_io_schedulers
-#endif
);
erts_late_init_time_sup();
erts_init_cpu_topology(); /* Must be after init_scheduling */
@@ -378,6 +354,7 @@ erl_init(int ncpu,
erts_init_bif();
erts_init_bif_chksum();
erts_init_bif_binary();
+ erts_init_bif_persistent_term();
erts_init_bif_re();
erts_init_unicode(); /* after RE to get access to PCRE unicode */
erts_init_external();
@@ -421,7 +398,7 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
*/
erts_init_empty_process(&parent);
- erts_smp_proc_lock(&parent, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(&parent, ERTS_PROC_LOCK_MAIN);
hp = HAlloc(&parent, argc*2 + 4);
args = NIL;
for (i = argc-1; i >= 0; i--) {
@@ -436,13 +413,13 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC;
res = erl_create_process(&parent, start_mod, am_start, args, &so);
- erts_smp_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN);
erts_cleanup_empty_process(&parent);
return res;
}
static Eterm
-erl_system_process_otp(Eterm parent_pid, char* modname)
+erl_system_process_otp(Eterm parent_pid, char* modname, int off_heap_msgq, int prio)
{
Eterm start_mod;
Process* parent;
@@ -457,9 +434,18 @@ erl_system_process_otp(Eterm parent_pid, char* modname)
parent = erts_pid2proc(NULL, 0, parent_pid, ERTS_PROC_LOCK_MAIN);
- so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC;
+ so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC|SPO_USE_ARGS;
+ if (off_heap_msgq)
+ so.flags |= SPO_OFF_HEAP_MSGQ;
+ so.min_heap_size = H_MIN_SIZE;
+ so.min_vheap_size = BIN_VH_MIN_SIZE;
+ so.max_heap_size = H_MAX_SIZE;
+ so.max_heap_flags = H_MAX_FLAGS;
+ so.priority = prio;
+ so.max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs);
+ so.scheduler = 0;
res = erl_create_process(parent, start_mod, am_start, NIL, &so);
- erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(parent, ERTS_PROC_LOCK_MAIN);
return res;
}
@@ -586,9 +572,19 @@ void erts_usage(void)
erts_fprintf(stderr, "-hmqd val set default message queue data flag for processes,\n");
erts_fprintf(stderr, " valid values are: off_heap | on_heap\n");
+ erts_fprintf(stderr, "-IOp number set number of pollsets to be used to poll for I/O,\n");
+ erts_fprintf(stderr, " This value has to be equal or smaller than the\n");
+ erts_fprintf(stderr, " number of poll threads. If the current platform\n");
+ erts_fprintf(stderr, " does not support concurrent update of pollsets\n");
+ erts_fprintf(stderr, " this value is ignored.\n");
+ erts_fprintf(stderr, "-IOt number set number of threads to be used to poll for I/O\n");
+ erts_fprintf(stderr, "-IOPp number set number of pollsets as a percentage of the\n");
+ erts_fprintf(stderr, " number of poll threads.");
+ erts_fprintf(stderr, "-IOPt number set number of threads to be used to poll for I/O\n");
+ erts_fprintf(stderr, " as a percentage of the number of schedulers.");
+
/* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
- erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n");
erts_fprintf(stderr, " Note that this flag is deprecated!\n");
erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
@@ -610,6 +606,10 @@ void erts_usage(void)
erts_fprintf(stderr, "-stbt type u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
erts_fprintf(stderr, "-sbwt val set scheduler busy wait threshold, valid values are:\n");
erts_fprintf(stderr, " none|very_short|short|medium|long|very_long.\n");
+ erts_fprintf(stderr, "-sbwtdcpu val set dirty CPU scheduler busy wait threshold, valid values are:\n");
+ erts_fprintf(stderr, " none|very_short|short|medium|long|very_long.\n");
+ erts_fprintf(stderr, "-sbwtdio val set dirty IO scheduler busy wait threshold, valid values are:\n");
+ erts_fprintf(stderr, " none|very_short|short|medium|long|very_long.\n");
erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n");
erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
@@ -628,10 +628,25 @@ void erts_usage(void)
erts_fprintf(stderr, " very_lazy|lazy|medium|eager|very_eager.\n");
erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n");
erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n");
+ erts_fprintf(stderr, "-swtdcpu val set dirty CPU scheduler wakeup threshold, valid values are:\n");
+ erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n");
+ erts_fprintf(stderr, "-swtdio val set dirty IO scheduler wakeup threshold, valid values are:\n");
+ erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n");
erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n");
- erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ erts_fprintf(stderr, " valid range is [%d-%d] (default %d)\n",
+ ERTS_SCHED_THREAD_MIN_STACK_SIZE,
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_SCHED_STACK_SIZE);
+ erts_fprintf(stderr, "-sssdcpu size suggested stack size in kilo words for dirty CPU scheduler\n");
+ erts_fprintf(stderr, " threads, valid range is [%d-%d] (default %d)\n",
ERTS_SCHED_THREAD_MIN_STACK_SIZE,
- ERTS_SCHED_THREAD_MAX_STACK_SIZE);
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE);
+ erts_fprintf(stderr, "-sssdio size suggested stack size in kilo words for dirty IO scheduler\n");
+ erts_fprintf(stderr, " threads, valid range is [%d-%d] (default %d)\n",
+ ERTS_SCHED_THREAD_MIN_STACK_SIZE,
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_DIO_SCHED_STACK_SIZE);
erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n");
erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
erts_fprintf(stderr, " schedulers online (n2), maximum for both\n");
@@ -640,7 +655,6 @@ void erts_usage(void)
erts_fprintf(stderr, "-SP p1:p2 specify schedulers (p1) and schedulers online (p2)\n");
erts_fprintf(stderr, " as percentages of logical processors configured and logical\n");
erts_fprintf(stderr, " processors available, respectively\n");
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_fprintf(stderr, "-SDcpu n1:n2 set number of dirty CPU schedulers (n1), and number of\n");
erts_fprintf(stderr, " dirty CPU schedulers online (n2), valid range for both\n");
erts_fprintf(stderr, " numbers is [1-%d], and n2 must be less than or equal to n1\n",
@@ -650,7 +664,6 @@ void erts_usage(void)
erts_fprintf(stderr, " and logical processors available, respectively\n");
erts_fprintf(stderr, "-SDio n set number of dirty I/O schedulers, valid range is [0-%d]\n",
ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS);
-#endif
erts_fprintf(stderr, "-t size set the maximum number of atoms the emulator can handle\n");
erts_fprintf(stderr, " valid range is [%d-%d]\n",
MIN_ATOM_TABLE_SIZE, MAX_ATOM_TABLE_SIZE);
@@ -678,7 +691,6 @@ void erts_usage(void)
erts_exit(1, "");
}
-#ifdef USE_THREADS
/*
* allocators for thread lib
*/
@@ -720,7 +732,6 @@ static void ethr_ll_free(void *ptr)
erts_free(ERTS_ALC_T_ETHR_LL, ptr);
}
-#endif
static int
early_init(int *argc, char **argv) /*
@@ -738,22 +749,16 @@ early_init(int *argc, char **argv) /*
int schdlrs_percentage = 100;
int schdlrs_onln_percentage = 100;
int max_main_threads;
-#ifdef ERTS_DIRTY_SCHEDULERS
int dirty_cpu_scheds;
int dirty_cpu_scheds_online;
int dirty_cpu_scheds_pctg = 100;
int dirty_cpu_scheds_onln_pctg = 100;
int dirty_io_scheds;
-#endif
int max_reader_groups;
int reader_groups;
char envbuf[21]; /* enough for any 64-bit integer */
size_t envbufsz;
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
- erts_main_thread = erts_thr_self();
-#endif
-
erts_save_emu_args(*argc, argv);
erts_sched_compact_load = 1;
@@ -767,19 +772,14 @@ early_init(int *argc, char **argv) /*
H_MAX_SIZE = H_DEFAULT_MAX_SIZE;
H_MAX_FLAGS = MAX_HEAP_SIZE_KILL|MAX_HEAP_SIZE_LOG;
- erts_initialized = 0;
+ erts_term_init();
- erts_use_sender_punish = 1;
+ erts_initialized = 0;
erts_pre_early_init_cpu_topology(&max_reader_groups,
&ncpu,
&ncpuonln,
&ncpuavail);
-#ifndef ERTS_SMP
- ncpu = 1;
- ncpuonln = 1;
- ncpuavail = 1;
-#endif
ignore_break = 0;
replace_intr = 0;
@@ -791,21 +791,12 @@ early_init(int *argc, char **argv) /*
erts_sys_pre_init();
erts_atomic_init_nob(&exiting, 0);
-#ifdef ERTS_SMP
erts_thr_progress_pre_init();
-#endif
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init();
-#endif
-#ifdef ERTS_SMP
- erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L);
+ erts_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L);
erts_tsd_key_create(&erts_is_crash_dumping_key,"erts_is_crash_dumping_key");
-#else
- erts_writing_erl_crash_dump = 0;
-#endif
- erts_smp_atomic32_init_nob(&erts_max_gen_gcs,
+ erts_atomic32_init_nob(&erts_max_gen_gcs,
(erts_aint32_t) ((Uint16) -1));
erts_pre_init_process();
@@ -822,16 +813,15 @@ early_init(int *argc, char **argv) /*
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
-#ifdef ERTS_DIRTY_SCHEDULERS
dirty_cpu_scheds = no_schedulers;
dirty_cpu_scheds_online = no_schedulers_online;
dirty_io_scheds = 10;
-#endif
envbufsz = sizeof(envbuf);
- /* erts_sys_getenv(_raw)() not initialized yet; need erts_sys_getenv__() */
- if (erts_sys_getenv__("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0)
+ /* erts_osenv hasn't been initialized yet, so we need to fall back to
+ * erts_sys_explicit_host_getenv() */
+ if (erts_sys_explicit_host_getenv("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 1)
erts_async_max_threads = atoi(envbuf);
else
erts_async_max_threads = ERTS_DEFAULT_NO_ASYNC_THREADS;
@@ -841,7 +831,7 @@ early_init(int *argc, char **argv) /*
if (argc && argv) {
int i = 1;
while (i < *argc) {
- if (strcmp(argv[i], "--") == 0) { /* end of emulator options */
+ if (sys_strcmp(argv[i], "--") == 0) { /* end of emulator options */
i++;
break;
}
@@ -879,6 +869,7 @@ early_init(int *argc, char **argv) /*
}
break;
}
+
case 'S' :
if (argv[i][2] == 'P') {
int ptot, ponln;
@@ -919,11 +910,10 @@ early_init(int *argc, char **argv) /*
("using %d:%d scheduler percentages\n",
schdlrs_percentage, schdlrs_onln_percentage));
}
-#ifdef ERTS_DIRTY_SCHEDULERS
else if (argv[i][2] == 'D') {
char *arg;
char *type = argv[i]+3;
- if (strncmp(type, "Pcpu", 4) == 0) {
+ if (sys_strncmp(type, "Pcpu", 4) == 0) {
int ptot, ponln;
arg = get_arg(argv[i]+7, argv[i+1], &i);
switch (sscanf(arg, "%d:%d", &ptot, &ponln)) {
@@ -960,7 +950,7 @@ early_init(int *argc, char **argv) /*
VERBOSE(DEBUG_SYSTEM,
("using %d:%d dirty CPU scheduler percentages\n",
dirty_cpu_scheds_pctg, dirty_cpu_scheds_onln_pctg));
- } else if (strncmp(type, "cpu", 3) == 0) {
+ } else if (sys_strncmp(type, "cpu", 3) == 0) {
int tot, onln;
arg = get_arg(argv[i]+6, argv[i+1], &i);
switch (sscanf(arg, "%d:%d", &tot, &onln)) {
@@ -1011,7 +1001,7 @@ early_init(int *argc, char **argv) /*
}
VERBOSE(DEBUG_SYSTEM,
("using %d:%d dirty CPU scheduler(s)\n", tot, onln));
- } else if (strncmp(type, "io", 2) == 0) {
+ } else if (sys_strncmp(type, "io", 2) == 0) {
arg = get_arg(argv[i]+5, argv[i+1], &i);
dirty_io_scheds = atoi(arg);
if (dirty_io_scheds < 0 ||
@@ -1031,7 +1021,6 @@ early_init(int *argc, char **argv) /*
break;
}
}
-#endif
else {
int tot, onln;
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -1090,7 +1079,6 @@ early_init(int *argc, char **argv) /*
i++;
}
-#ifdef ERTS_SMP
/* apply any scheduler percentages */
if (schdlrs_percentage != 100 || schdlrs_onln_percentage != 100) {
schdlrs = schdlrs * schdlrs_percentage / 100;
@@ -1114,12 +1102,6 @@ early_init(int *argc, char **argv) /*
erts_usage();
}
}
-#else
- /* Silence gcc warnings */
- (void)schdlrs_percentage;
- (void)schdlrs_onln_percentage;
-#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
/* apply any dirty scheduler precentages */
if (dirty_cpu_scheds_pctg != 100 || dirty_cpu_scheds_onln_pctg != 100) {
dirty_cpu_scheds = dirty_cpu_scheds * dirty_cpu_scheds_pctg / 100;
@@ -1127,33 +1109,31 @@ early_init(int *argc, char **argv) /*
}
if (dirty_cpu_scheds > schdlrs)
dirty_cpu_scheds = schdlrs;
+ if (dirty_cpu_scheds < 1)
+ dirty_cpu_scheds = 1;
if (dirty_cpu_scheds_online > schdlrs_onln)
dirty_cpu_scheds_online = schdlrs_onln;
-#endif
+ if (dirty_cpu_scheds_online < 1)
+ dirty_cpu_scheds_online = 1;
}
-#ifndef USE_THREADS
- erts_async_max_threads = 0;
-#endif
-#ifdef ERTS_SMP
no_schedulers = schdlrs;
no_schedulers_online = schdlrs_onln;
erts_no_schedulers = (Uint) no_schedulers;
-#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds;
no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online;
erts_no_dirty_io_schedulers = no_dirty_io_schedulers = dirty_io_scheds;
-#endif
erts_early_init_scheduling(no_schedulers);
alloc_opts.ncpu = ncpu;
erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes)
-M flags. */
/* Require allocators */
-#ifdef ERTS_SMP
+
+ erts_init_check_io(argc, argv);
+
/*
* Thread progress management:
*
@@ -1161,22 +1141,18 @@ early_init(int *argc, char **argv) /*
* ** Scheduler threads (see erl_process.c)
* ** Aux thread (see erl_process.c)
* ** Sys message dispatcher thread (see erl_trace.c)
+ * ** IO Poll threads (see erl_check_io.c)
*
* * Unmanaged threads that need to register:
* ** Async threads (see erl_async.c)
* ** Dirty scheduler threads
*/
erts_thr_progress_init(no_schedulers,
- no_schedulers+2,
-#ifndef ERTS_DIRTY_SCHEDULERS
- erts_async_max_threads
-#else
+ no_schedulers+2+erts_no_poll_threads,
erts_async_max_threads +
erts_no_dirty_cpu_schedulers +
erts_no_dirty_io_schedulers
-#endif
);
-#endif
erts_thr_q_init();
erts_init_utils();
erts_early_init_cpu_topology(no_schedulers,
@@ -1184,7 +1160,6 @@ early_init(int *argc, char **argv) /*
max_reader_groups,
&reader_groups);
-#ifdef USE_THREADS
{
erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER;
elid.mem.std.alloc = ethr_std_alloc;
@@ -1201,7 +1176,6 @@ early_init(int *argc, char **argv) /*
erts_thr_late_init(&elid);
}
-#endif
erts_msacc_early_init();
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -1228,26 +1202,6 @@ early_init(int *argc, char **argv) /*
return ncpu;
}
-#ifndef ERTS_SMP
-static void set_main_stack_size(void)
-{
- if (erts_sched_thread_suggested_stack_size > 0) {
-# if HAVE_DECL_GETRLIMIT && HAVE_DECL_SETRLIMIT && HAVE_DECL_RLIMIT_STACK
- struct rlimit rl;
- int bytes = erts_sched_thread_suggested_stack_size * sizeof(Uint) * 1024;
- if (getrlimit(RLIMIT_STACK, &rl) != 0 ||
- (rl.rlim_cur = bytes, setrlimit(RLIMIT_STACK, &rl) != 0)) {
- erts_fprintf(stderr, "failed to set stack size for scheduler "
- "thread to %d bytes\n", bytes);
- erts_usage();
- }
-# else
- erts_fprintf(stderr, "no OS support for dynamic stack size limit\n");
- erts_usage();
-# endif
- }
-}
-#endif
void
erl_start(int argc, char **argv)
@@ -1267,36 +1221,36 @@ erl_start(int argc, char **argv)
ErtsTimeWarpMode time_warp_mode;
int node_tab_delete_delay = ERTS_NODE_TAB_DELAY_GC_DEFAULT;
ErtsDbSpinCount db_spin_count = ERTS_DB_SPNCNT_NORMAL;
- Eterm otp_ring0_pid;
set_default_time_adj(&time_correction,
&time_warp_mode);
envbufsz = sizeof(envbuf);
- if (erts_sys_getenv_raw(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0)
+ if (erts_sys_explicit_8bit_getenv(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 1)
user_requested_db_max_tabs = atoi(envbuf);
else
user_requested_db_max_tabs = 0;
envbufsz = sizeof(envbuf);
- if (erts_sys_getenv_raw("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) {
+ if (erts_sys_explicit_8bit_getenv("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 1) {
Uint16 max_gen_gcs = atoi(envbuf);
- erts_smp_atomic32_set_nob(&erts_max_gen_gcs,
+ erts_atomic32_set_nob(&erts_max_gen_gcs,
(erts_aint32_t) max_gen_gcs);
}
envbufsz = sizeof(envbuf);
- if (erts_sys_getenv_raw("ERL_MAX_PORTS", envbuf, &envbufsz) == 0) {
+ if (erts_sys_explicit_8bit_getenv("ERL_MAX_PORTS", envbuf, &envbufsz) == 1) {
port_tab_sz = atoi(envbuf);
port_tab_sz_ignore_files = 1;
}
-#if (defined(__APPLE__) && defined(__MACH__)) || defined(__DARWIN__)
/*
- * The default stack size on MacOS X is too small for pcre.
+ * A default stack size suitable for pcre which might use quite
+ * a lot of stack.
*/
- erts_sched_thread_suggested_stack_size = 256;
-#endif
+ erts_sched_thread_suggested_stack_size = ERTS_DEFAULT_SCHED_STACK_SIZE;
+ erts_dcpu_sched_thread_suggested_stack_size = ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE;
+ erts_dio_sched_thread_suggested_stack_size = ERTS_DEFAULT_DIO_SCHED_STACK_SIZE;
#ifdef DEBUG
verbose = DEBUG_DEFAULT;
@@ -1308,7 +1262,7 @@ erl_start(int argc, char **argv)
if (argv[i][0] != '-') {
erts_usage();
}
- if (strcmp(argv[i], "--") == 0) { /* end of emulator options */
+ if (sys_strcmp(argv[i], "--") == 0) { /* end of emulator options */
i++;
break;
}
@@ -1336,12 +1290,12 @@ erl_start(int argc, char **argv)
("using display items %d\n",display_items));
break;
case 'p':
- if (!strncmp(argv[i],"-pc",3)) {
+ if (!sys_strncmp(argv[i],"-pc",3)) {
int printable_chars = ERL_PRINTABLE_CHARACTERS_LATIN1;
arg = get_arg(argv[i]+3, argv[i+1], &i);
- if (!strcmp(arg,"unicode")) {
+ if (!sys_strcmp(arg,"unicode")) {
printable_chars = ERL_PRINTABLE_CHARACTERS_UNICODE;
- } else if (strcmp(arg,"latin1")) {
+ } else if (sys_strcmp(arg,"latin1")) {
erts_fprintf(stderr, "bad range of printable "
"characters: %s\n", arg);
erts_usage();
@@ -1353,7 +1307,7 @@ erl_start(int argc, char **argv)
erts_usage();
}
case 'f':
- if (!strncmp(argv[i],"-fn",3)) {
+ if (!sys_strncmp(argv[i],"-fn",3)) {
int warning_type = ERL_FILENAME_WARNING_WARNING;
arg = get_arg(argv[i]+3, argv[i+1], &i);
switch (*arg) {
@@ -1464,12 +1418,8 @@ erl_start(int argc, char **argv)
#ifdef DEBUG
strcat(tmp, ",DEBUG");
#endif
-#ifdef ERTS_SMP
strcat(tmp, ",SMP");
-#endif
-#ifdef USE_THREADS
strcat(tmp, ",ASYNC_THREADS");
-#endif
#ifdef HIPE
strcat(tmp, ",HIPE");
#endif
@@ -1540,9 +1490,9 @@ erl_start(int argc, char **argv)
}
} else if (has_prefix("maxk", sub_param)) {
arg = get_arg(sub_param+4, argv[i+1], &i);
- if (strcmp(arg,"true") == 0) {
+ if (sys_strcmp(arg,"true") == 0) {
H_MAX_FLAGS |= MAX_HEAP_SIZE_KILL;
- } else if (strcmp(arg,"false") == 0) {
+ } else if (sys_strcmp(arg,"false") == 0) {
H_MAX_FLAGS &= ~MAX_HEAP_SIZE_KILL;
} else {
erts_fprintf(stderr, "bad max heap kill %s\n", arg);
@@ -1551,9 +1501,9 @@ erl_start(int argc, char **argv)
VERBOSE(DEBUG_SYSTEM, ("using max heap kill %d\n", H_MAX_FLAGS));
} else if (has_prefix("maxel", sub_param)) {
arg = get_arg(sub_param+5, argv[i+1], &i);
- if (strcmp(arg,"true") == 0) {
+ if (sys_strcmp(arg,"true") == 0) {
H_MAX_FLAGS |= MAX_HEAP_SIZE_LOG;
- } else if (strcmp(arg,"false") == 0) {
+ } else if (sys_strcmp(arg,"false") == 0) {
H_MAX_FLAGS &= ~MAX_HEAP_SIZE_LOG;
} else {
erts_fprintf(stderr, "bad max heap error logger %s\n", arg);
@@ -1645,16 +1595,6 @@ erl_start(int argc, char **argv)
have_break_handler = 0;
break;
- case 'K':
- /* If kernel poll support is present,
- erl_sys_args() will remove the K parameter
- and value */
- get_arg(argv[i]+2, argv[i+1], &i);
- erts_fprintf(stderr,
- "kernel-poll not supported; \"K\" parameter ignored\n",
- arg);
- break;
-
case 'n':
arg = get_arg(argv[i]+2, argv[i+1], &i);
switch (arg[0]) {
@@ -1681,7 +1621,7 @@ erl_start(int argc, char **argv)
case 'P': /* set maximum number of processes */
arg = get_arg(argv[i]+2, argv[i+1], &i);
- if (strcmp(arg, "legacy") == 0)
+ if (sys_strcmp(arg, "legacy") == 0)
legacy_proc_tab = 1;
else {
errno = 0;
@@ -1697,7 +1637,7 @@ erl_start(int argc, char **argv)
case 'Q': /* set maximum number of ports */
arg = get_arg(argv[i]+2, argv[i+1], &i);
- if (strcmp(arg, "legacy") == 0)
+ if (sys_strcmp(arg, "legacy") == 0)
legacy_port_tab = 1;
else {
errno = 0;
@@ -1715,11 +1655,11 @@ erl_start(int argc, char **argv)
case 'S' : /* Was handled in early_init() just read past it */
if (argv[i][2] == 'D') {
char* type = argv[i]+3;
- if (strcmp(type, "Pcpu") == 0)
+ if (sys_strcmp(type, "Pcpu") == 0)
(void) get_arg(argv[i]+7, argv[i+1], &i);
- if (strcmp(type, "cpu") == 0)
+ if (sys_strcmp(type, "cpu") == 0)
(void) get_arg(argv[i]+6, argv[i+1], &i);
- else if (strcmp(type, "io") == 0)
+ else if (sys_strcmp(type, "io") == 0)
(void) get_arg(argv[i]+5, argv[i+1], &i);
} else if (argv[i][2] == 'P')
(void) get_arg(argv[i]+3, argv[i+1], &i);
@@ -1756,15 +1696,41 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
+ else if (has_prefix("bwtdcpu", sub_param)) {
+ arg = get_arg(sub_param + 7, argv[i+1], &i);
+
+ if (erts_sched_set_busy_wait_threshold(ERTS_SCHED_DIRTY_CPU, arg) != 0) {
+ erts_fprintf(stderr, "bad dirty CPU scheduler busy wait threshold: %s\n",
+ arg);
+ erts_usage();
+ }
+
+ VERBOSE(DEBUG_SYSTEM,
+ ("dirty CPU scheduler wakeup threshold: %s\n", arg));
+ }
+ else if (has_prefix("bwtdio", sub_param)) {
+ arg = get_arg(sub_param + 6, argv[i+1], &i);
+
+ if (erts_sched_set_busy_wait_threshold(ERTS_SCHED_DIRTY_IO, arg) != 0) {
+ erts_fprintf(stderr, "bad dirty IO scheduler busy wait threshold: %s\n",
+ arg);
+ erts_usage();
+ }
+
+ VERBOSE(DEBUG_SYSTEM,
+ ("dirty IO scheduler wakeup threshold: %s\n", arg));
+ }
else if (has_prefix("bwt", sub_param)) {
- arg = get_arg(sub_param+3, argv[i+1], &i);
- if (erts_sched_set_busy_wait_threshold(arg) != 0) {
+ arg = get_arg(sub_param + 3, argv[i+1], &i);
+
+ if (erts_sched_set_busy_wait_threshold(ERTS_SCHED_NORMAL, arg) != 0) {
erts_fprintf(stderr, "bad scheduler busy wait threshold: %s\n",
arg);
erts_usage();
}
+
VERBOSE(DEBUG_SYSTEM,
- ("scheduler wakup threshold: %s\n", arg));
+ ("scheduler wakeup threshold: %s\n", arg));
}
else if (has_prefix("cl", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
@@ -1824,22 +1790,10 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
- else if (has_prefix("ecio", sub_param)) {
- arg = get_arg(sub_param+4, argv[i+1], &i);
-#ifndef __OSE__
- if (sys_strcmp("true", arg) == 0)
- erts_eager_check_io = 1;
- else
-#endif
- if (sys_strcmp("false", arg) == 0)
- erts_eager_check_io = 0;
- else {
- erts_fprintf(stderr,
- "bad schedule eager check I/O value '%s'\n",
- arg);
- erts_usage();
- }
- }
+ else if (has_prefix("ecio", sub_param)) {
+ /* ignore argument, eager check io no longer used */
+ arg = get_arg(sub_param+4, argv[i+1], &i);
+ }
else if (has_prefix("pp", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (sys_strcmp(arg, "true") == 0)
@@ -1853,8 +1807,6 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
- else if (sys_strcmp("nsp", sub_param) == 0)
- erts_use_sender_punish = 0;
else if (has_prefix("tbt", sub_param)) {
arg = get_arg(sub_param+3, argv[i+1], &i);
res = erts_init_scheduler_bind_type_string(arg);
@@ -1895,9 +1847,29 @@ erl_start(int argc, char **argv)
VERBOSE(DEBUG_SYSTEM,
("scheduler wake cleanup threshold: %s\n", arg));
}
+ else if (has_prefix("wtdcpu", sub_param)) {
+ arg = get_arg(sub_param+6, argv[i+1], &i);
+ if (erts_sched_set_wakeup_other_threshold(ERTS_SCHED_DIRTY_CPU, arg) != 0) {
+ erts_fprintf(stderr, "dirty CPU scheduler wakeup threshold: %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("dirty CPU scheduler wakeup threshold: %s\n", arg));
+ }
+ else if (has_prefix("wtdio", sub_param)) {
+ arg = get_arg(sub_param+5, argv[i+1], &i);
+ if (erts_sched_set_wakeup_other_threshold(ERTS_SCHED_DIRTY_IO, arg) != 0) {
+ erts_fprintf(stderr, "dirty IO scheduler wakeup threshold: %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("dirty IO scheduler wakeup threshold: %s\n", arg));
+ }
else if (has_prefix("wt", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
- if (erts_sched_set_wakeup_other_thresold(arg) != 0) {
+ if (erts_sched_set_wakeup_other_threshold(ERTS_SCHED_NORMAL, arg) != 0) {
erts_fprintf(stderr, "scheduler wakeup threshold: %s\n",
arg);
erts_usage();
@@ -1907,7 +1879,7 @@ erl_start(int argc, char **argv)
}
else if (has_prefix("ws", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
- if (erts_sched_set_wakeup_other_type(arg) != 0) {
+ if (erts_sched_set_wakeup_other_type(ERTS_SCHED_NORMAL, arg) != 0) {
erts_fprintf(stderr, "scheduler wakeup strategy: %s\n",
arg);
erts_usage();
@@ -1915,10 +1887,45 @@ erl_start(int argc, char **argv)
VERBOSE(DEBUG_SYSTEM,
("scheduler wakeup threshold: %s\n", arg));
}
+ else if (has_prefix("ssdcpu", sub_param)) {
+ /* suggested stack size (Kilo Words) for dirty CPU scheduler threads */
+ arg = get_arg(sub_param+6, argv[i+1], &i);
+ erts_dcpu_sched_thread_suggested_stack_size = atoi(arg);
+
+ if ((erts_dcpu_sched_thread_suggested_stack_size
+ < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ || (erts_dcpu_sched_thread_suggested_stack_size >
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE)) {
+ erts_fprintf(stderr, "bad stack size for dirty CPU scheduler threads %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("suggested dirty CPU scheduler thread stack size %d kilo words\n",
+ erts_dcpu_sched_thread_suggested_stack_size));
+ }
+ else if (has_prefix("ssdio", sub_param)) {
+ /* suggested stack size (Kilo Words) for dirty IO scheduler threads */
+ arg = get_arg(sub_param+5, argv[i+1], &i);
+ erts_dio_sched_thread_suggested_stack_size = atoi(arg);
+
+ if ((erts_dio_sched_thread_suggested_stack_size
+ < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ || (erts_dio_sched_thread_suggested_stack_size >
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE)) {
+ erts_fprintf(stderr, "bad stack size for dirty IO scheduler threads %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("suggested dirty IO scheduler thread stack size %d kilo words\n",
+ erts_dio_sched_thread_suggested_stack_size));
+ }
else if (has_prefix("ss", sub_param)) {
/* suggested stack size (Kilo Words) for scheduler threads */
arg = get_arg(sub_param+2, argv[i+1], &i);
erts_sched_thread_suggested_stack_size = atoi(arg);
+ modified_sched_thread_suggested_stack_size = 1;
if ((erts_sched_thread_suggested_stack_size
< ERTS_SCHED_THREAD_MIN_STACK_SIZE)
@@ -1944,9 +1951,7 @@ erl_start(int argc, char **argv)
arg);
erts_usage();
}
-#ifdef ERTS_SMP
erts_runq_supervision_interval = val;
-#endif
}
else {
erts_fprintf(stderr, "bad scheduling option %s\n", argv[i]);
@@ -2223,12 +2228,18 @@ erl_start(int argc, char **argv)
init_break_handler();
if (replace_intr)
erts_replace_intr();
- sys_init_suspend_handler();
#endif
boot_argc = argc - i; /* Number of arguments to init */
boot_argv = &argv[i];
+ if (erts_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+ if (erts_dcpu_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_dcpu_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+ if (erts_dio_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_dio_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+
erl_init(ncpu,
proc_tab_sz,
legacy_proc_tab,
@@ -2246,8 +2257,8 @@ erl_start(int argc, char **argv)
erts_initialized = 1;
- otp_ring0_pid = erl_first_process_otp("otp_ring0", NULL, 0,
- boot_argc, boot_argv);
+ erts_init_process_id = erl_first_process_otp("otp_ring0", NULL, 0,
+ boot_argc, boot_argv);
{
/*
@@ -2257,58 +2268,67 @@ erl_start(int argc, char **argv)
*/
Eterm pid;
- pid = erl_system_process_otp(otp_ring0_pid, "erts_code_purger");
+ pid = erl_system_process_otp(erts_init_process_id,
+ "erts_code_purger", !0,
+ PRIORITY_NORMAL);
erts_code_purger
= (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
internal_pid_index(pid));
ASSERT(erts_code_purger && erts_code_purger->common.id == pid);
erts_proc_inc_refc(erts_code_purger);
-#ifdef ERTS_NEW_PURGE_STRATEGY
- pid = erl_system_process_otp(otp_ring0_pid, "erts_literal_area_collector");
+ pid = erl_system_process_otp(erts_init_process_id,
+ "erts_literal_area_collector",
+ !0, PRIORITY_NORMAL);
erts_literal_area_collector
= (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
internal_pid_index(pid));
ASSERT(erts_literal_area_collector
&& erts_literal_area_collector->common.id == pid);
erts_proc_inc_refc(erts_literal_area_collector);
-#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
- pid = erl_system_process_otp(otp_ring0_pid, "erts_dirty_process_code_checker");
- erts_dirty_process_code_checker
+ pid = erl_system_process_otp(erts_init_process_id,
+ "erts_dirty_process_signal_handler",
+ !0, PRIORITY_NORMAL);
+ erts_dirty_process_signal_handler
= (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
internal_pid_index(pid));
- ASSERT(erts_dirty_process_code_checker
- && erts_dirty_process_code_checker->common.id == pid);
- erts_proc_inc_refc(erts_dirty_process_code_checker);
-#endif
-
+ ASSERT(erts_dirty_process_signal_handler
+ && erts_dirty_process_signal_handler->common.id == pid);
+ erts_proc_inc_refc(erts_dirty_process_signal_handler);
+
+ pid = erl_system_process_otp(erts_init_process_id,
+ "erts_dirty_process_signal_handler",
+ !0, PRIORITY_HIGH);
+ erts_dirty_process_signal_handler_high
+ = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(pid));
+ ASSERT(erts_dirty_process_signal_handler_high
+ && erts_dirty_process_signal_handler_high->common.id == pid);
+ erts_proc_inc_refc(erts_dirty_process_signal_handler_high);
+
+ pid = erl_system_process_otp(erts_init_process_id,
+ "erts_dirty_process_signal_handler",
+ !0, PRIORITY_MAX);
+ erts_dirty_process_signal_handler_max
+ = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(pid));
+ ASSERT(erts_dirty_process_signal_handler_max
+ && erts_dirty_process_signal_handler_max->common.id == pid);
+ erts_proc_inc_refc(erts_dirty_process_signal_handler_max);
}
-#ifdef ERTS_SMP
erts_start_schedulers();
- /* Let system specific code decide what to do with the main thread... */
- erts_sys_main_thread(); /* May or may not return! */
-#else
- {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- erts_msacc_init_thread("scheduler", 1, 1);
- erts_thr_set_main_status(1, 1);
-#if ERTS_USE_ASYNC_READY_Q
- esdp->aux_work_data.async_ready.queue
- = erts_get_async_ready_queue(1);
-#endif
- set_main_stack_size();
- erts_sched_init_time_sup(esdp);
- process_main();
- }
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_post_startup();
#endif
+
+ /* Let system specific code decide what to do with the main thread... */
+ erts_sys_main_thread(); /* May or may not return! */
}
-#ifdef USE_THREADS
__decl_noreturn void erts_thr_fatal_error(int err, char *what)
{
@@ -2322,7 +2342,6 @@ __decl_noreturn void erts_thr_fatal_error(int err, char *what)
abort();
}
-#endif
static void
system_cleanup(int flush_async)
@@ -2335,16 +2354,14 @@ system_cleanup(int flush_async)
* Another thread is currently exiting the system;
* wait for it to do its job.
*/
-#ifdef ERTS_SMP
if (erts_thr_progress_is_managed_thread()) {
/*
* The exiting thread might be waiting for
* us to block; need to update status...
*/
- erts_thr_progress_active(NULL, 0);
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_active(erts_thr_prgr_data(NULL), 0);
+ erts_thr_progress_prepare_wait(erts_thr_prgr_data(NULL));
}
-#endif
/* Wait forever... */
while (1)
erts_milli_sleep(10000000);
@@ -2359,21 +2376,18 @@ system_cleanup(int flush_async)
if (!flush_async
|| !erts_initialized
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
- || !erts_equal_tids(erts_main_thread, erts_thr_self())
-#endif
)
return;
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
#endif
-#endif
erts_exit_flush_async();
}
+static int erts_exit_code;
+
static __decl_noreturn void __noreturn
erts_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
{
@@ -2385,12 +2399,21 @@ erts_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
if (fmt != NULL && *fmt != '\0')
erl_error(fmt, args2); /* Print error message. */
- /* Produce an Erlang core dump if error */
+ erts_exit_code = n;
+
+ /* Produce an Erlang crash dump if error */
if (((n == ERTS_ERROR_EXIT && erts_no_crash_dump == 0) || n == ERTS_DUMP_EXIT)
&& erts_initialized) {
erl_crash_dump_v((char*) NULL, 0, fmt, args1);
}
+ erts_exit_epilogue();
+}
+
+__decl_noreturn void __noreturn erts_exit_epilogue(void)
+{
+ int n = erts_exit_code;
+
sys_tty_reset(n);
if (n == ERTS_INTR_EXIT)
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
deleted file mode 100644
index f84c63e7a4..0000000000
--- a/erts/emulator/beam/erl_instrument.c
+++ /dev/null
@@ -1,1277 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "global.h"
-#include "big.h"
-#include "erl_instrument.h"
-#include "erl_threads.h"
-
-typedef union { long l; double d; } Align_t;
-
-typedef struct {
- Uint size;
-#ifdef VALGRIND
- void* valgrind_leak_suppressor;
-#endif
- Align_t mem[1];
-} StatBlock_t;
-
-#define STAT_BLOCK_HEADER_SIZE (sizeof(StatBlock_t) - sizeof(Align_t))
-
-typedef struct MapStatBlock_t_ MapStatBlock_t;
-struct MapStatBlock_t_ {
- Uint size;
- ErtsAlcType_t type_no;
- Eterm pid;
- MapStatBlock_t *prev;
- MapStatBlock_t *next;
- Align_t mem[1];
-};
-
-#define MAP_STAT_BLOCK_HEADER_SIZE (sizeof(MapStatBlock_t) - sizeof(Align_t))
-
-typedef struct {
- Uint size;
- Uint max_size;
- Uint max_size_ever;
-
- Uint blocks;
- Uint max_blocks;
- Uint max_blocks_ever;
-} Stat_t;
-
-static erts_mtx_t instr_mutex;
-static erts_mtx_t instr_x_mutex;
-
-int erts_instr_memory_map;
-int erts_instr_stat;
-
-static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];
-
-struct stats_ {
- Stat_t tot;
- Stat_t a[ERTS_ALC_A_MAX+1];
- Stat_t *ap[ERTS_ALC_A_MAX+1];
- Stat_t c[ERTS_ALC_C_MAX+1];
- Stat_t n[ERTS_ALC_N_MAX+1];
-};
-
-static struct stats_ *stats;
-
-static MapStatBlock_t *mem_anchor;
-
-static Eterm *am_tot;
-static Eterm *am_n;
-static Eterm *am_a;
-static Eterm *am_c;
-
-static int atoms_initialized;
-
-static struct {
- Eterm total;
- Eterm allocators;
- Eterm classes;
- Eterm types;
- Eterm sizes;
- Eterm blocks;
- Eterm instr_hdr;
-#ifdef DEBUG
- Eterm end_of_atoms;
-#endif
-} am;
-
-static void ERTS_INLINE atom_init(Eterm *atom, const char *name)
-{
- *atom = am_atom_put((char *) name, strlen(name));
-}
-#define AM_INIT(AM) atom_init(&am.AM, #AM)
-
-static void
-init_atoms(void)
-{
-#ifdef DEBUG
- Eterm *atom;
- for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
- *atom = THE_NON_VALUE;
- }
-#endif
-
- AM_INIT(total);
- AM_INIT(allocators);
- AM_INIT(classes);
- AM_INIT(types);
- AM_INIT(sizes);
- AM_INIT(blocks);
- AM_INIT(instr_hdr);
-
-#ifdef DEBUG
- for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
- ASSERT(*atom != THE_NON_VALUE);
- }
-#endif
-
- atoms_initialized = 1;
-}
-
-#undef AM_INIT
-
-static void
-init_am_tot(void)
-{
- am_tot = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
- sizeof(Eterm));
- atom_init(am_tot, "total");
-}
-
-
-static void
-init_am_n(void)
-{
- int i;
- am_n = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
- (ERTS_ALC_N_MAX+1)*sizeof(Eterm));
-
- for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) {
- atom_init(&am_n[i], ERTS_ALC_N2TD(i));
- }
-
-}
-
-static void
-init_am_c(void)
-{
- int i;
- am_c = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
- (ERTS_ALC_C_MAX+1)*sizeof(Eterm));
-
- for (i = ERTS_ALC_C_MIN; i <= ERTS_ALC_C_MAX; i++) {
- atom_init(&am_c[i], ERTS_ALC_C2CD(i));
- }
-
-}
-
-static void
-init_am_a(void)
-{
- int i;
- am_a = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
- (ERTS_ALC_A_MAX+1)*sizeof(Eterm));
-
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- atom_init(&am_a[i], ERTS_ALC_A2AD(i));
- }
-
-}
-
-static ERTS_INLINE void
-stat_upd_alloc(ErtsAlcType_t n, Uint size)
-{
- ErtsAlcType_t t = ERTS_ALC_N2T(n);
- ErtsAlcType_t a = ERTS_ALC_T2A(t);
- ErtsAlcType_t c = ERTS_ALC_T2C(t);
-
- stats->ap[a]->size += size;
- if (stats->ap[a]->max_size < stats->ap[a]->size)
- stats->ap[a]->max_size = stats->ap[a]->size;
-
- stats->c[c].size += size;
- if (stats->c[c].max_size < stats->c[c].size)
- stats->c[c].max_size = stats->c[c].size;
-
- stats->n[n].size += size;
- if (stats->n[n].max_size < stats->n[n].size)
- stats->n[n].max_size = stats->n[n].size;
-
- stats->tot.size += size;
- if (stats->tot.max_size < stats->tot.size)
- stats->tot.max_size = stats->tot.size;
-
- stats->ap[a]->blocks++;
- if (stats->ap[a]->max_blocks < stats->ap[a]->blocks)
- stats->ap[a]->max_blocks = stats->ap[a]->blocks;
-
- stats->c[c].blocks++;
- if (stats->c[c].max_blocks < stats->c[c].blocks)
- stats->c[c].max_blocks = stats->c[c].blocks;
-
- stats->n[n].blocks++;
- if (stats->n[n].max_blocks < stats->n[n].blocks)
- stats->n[n].max_blocks = stats->n[n].blocks;
-
- stats->tot.blocks++;
- if (stats->tot.max_blocks < stats->tot.blocks)
- stats->tot.max_blocks = stats->tot.blocks;
-
-}
-
-
-static ERTS_INLINE void
-stat_upd_free(ErtsAlcType_t n, Uint size)
-{
- ErtsAlcType_t t = ERTS_ALC_N2T(n);
- ErtsAlcType_t a = ERTS_ALC_T2A(t);
- ErtsAlcType_t c = ERTS_ALC_T2C(t);
-
- ASSERT(stats->ap[a]->size >= size);
- stats->ap[a]->size -= size;
-
- ASSERT(stats->c[c].size >= size);
- stats->c[c].size -= size;
-
- ASSERT(stats->n[n].size >= size);
- stats->n[n].size -= size;
-
- ASSERT(stats->tot.size >= size);
- stats->tot.size -= size;
-
- ASSERT(stats->ap[a]->blocks > 0);
- stats->ap[a]->blocks--;
-
- ASSERT(stats->c[c].blocks > 0);
- stats->c[c].blocks--;
-
- ASSERT(stats->n[n].blocks > 0);
- stats->n[n].blocks--;
-
- ASSERT(stats->tot.blocks > 0);
- stats->tot.blocks--;
-
-}
-
-
-static ERTS_INLINE void
-stat_upd_realloc(ErtsAlcType_t n, Uint size, Uint old_size)
-{
- if (old_size)
- stat_upd_free(n, old_size);
- stat_upd_alloc(n, size);
-}
-
-/*
- * stat instrumentation callback functions
- */
-
-static void stat_pre_lock(void)
-{
- erts_mtx_lock(&instr_mutex);
-}
-
-static void stat_pre_unlock(void)
-{
- erts_mtx_unlock(&instr_mutex);
-}
-
-static ErtsAllocatorWrapper_t instr_wrapper;
-
-static void *
-stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- Uint ssize;
- void *res;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_mutex);
- }
-
- ssize = size + STAT_BLOCK_HEADER_SIZE;
- res = (*real_af->alloc)(n, real_af->extra, ssize);
- if (res) {
- stat_upd_alloc(n, size);
- ((StatBlock_t *) res)->size = size;
-#ifdef VALGRIND
- /* Suppress "possibly leaks" by storing an actual dummy pointer
- to the _start_ of the allocated block.*/
- ((StatBlock_t *) res)->valgrind_leak_suppressor = res;
-#endif
- res = (void *) ((StatBlock_t *) res)->mem;
- }
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- }
-
- return res;
-}
-
-static void *
-stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- Uint old_size;
- Uint ssize;
- void *sptr;
- void *res;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_mutex);
- }
-
- if (ptr) {
- sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE);
- old_size = ((StatBlock_t *) sptr)->size;
- }
- else {
- sptr = NULL;
- old_size = 0;
- }
-
- ssize = size + STAT_BLOCK_HEADER_SIZE;
- res = (*real_af->realloc)(n, real_af->extra, sptr, ssize);
- if (res) {
- stat_upd_realloc(n, size, old_size);
- ((StatBlock_t *) res)->size = size;
-#ifdef VALGRIND
- ((StatBlock_t *) res)->valgrind_leak_suppressor = res;
-#endif
- res = (void *) ((StatBlock_t *) res)->mem;
- }
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- }
-
- return res;
-}
-
-static void
-stat_free(ErtsAlcType_t n, void *extra, void *ptr)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- void *sptr;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_mutex);
- }
-
- if (ptr) {
- sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE);
- stat_upd_free(n, ((StatBlock_t *) sptr)->size);
- }
- else {
- sptr = NULL;
- }
-
- (*real_af->free)(n, real_af->extra, sptr);
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- }
-
-}
-
-/*
- * map stat instrumentation callback functions
- */
-
-static void map_stat_pre_lock(void)
-{
- erts_mtx_lock(&instr_x_mutex);
- erts_mtx_lock(&instr_mutex);
-}
-
-static void map_stat_pre_unlock(void)
-{
- erts_mtx_unlock(&instr_mutex);
- erts_mtx_unlock(&instr_x_mutex);
-}
-
-static void *
-map_stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- Uint msize;
- void *res;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_mutex);
- }
-
- msize = size + MAP_STAT_BLOCK_HEADER_SIZE;
- res = (*real_af->alloc)(n, real_af->extra, msize);
- if (res) {
- MapStatBlock_t *mb = (MapStatBlock_t *) res;
- stat_upd_alloc(n, size);
-
- mb->size = size;
- mb->type_no = n;
- mb->pid = erts_get_current_pid();
-
- mb->prev = NULL;
- mb->next = mem_anchor;
- if (mem_anchor)
- mem_anchor->prev = mb;
- mem_anchor = mb;
-
- res = (void *) mb->mem;
- }
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- }
-
- return res;
-}
-
-static void *
-map_stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- Uint old_size;
- Uint msize;
- void *mptr;
- void *res;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_x_mutex);
- erts_mtx_lock(&instr_mutex);
- }
-
- if (ptr) {
- mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE);
- old_size = ((MapStatBlock_t *) mptr)->size;
- }
- else {
- mptr = NULL;
- old_size = 0;
- }
-
- msize = size + MAP_STAT_BLOCK_HEADER_SIZE;
- res = (*real_af->realloc)(n, real_af->extra, mptr, msize);
- if (res) {
- MapStatBlock_t *mb = (MapStatBlock_t *) res;
-
- mb->size = size;
- mb->type_no = n;
- mb->pid = erts_get_current_pid();
-
- stat_upd_realloc(n, size, old_size);
-
- if (mptr != res) {
-
- if (mptr) {
- if (mb->prev)
- mb->prev->next = mb;
- else {
- ASSERT(mem_anchor == (MapStatBlock_t *) mptr);
- mem_anchor = mb;
- }
- if (mb->next)
- mb->next->prev = mb;
- }
- else {
- mb->prev = NULL;
- mb->next = mem_anchor;
- if (mem_anchor)
- mem_anchor->prev = mb;
- mem_anchor = mb;
- }
-
- }
-
- res = (void *) mb->mem;
- }
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- erts_mtx_unlock(&instr_x_mutex);
- }
-
- return res;
-}
-
-static void
-map_stat_free(ErtsAlcType_t n, void *extra, void *ptr)
-{
- ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
- void *mptr;
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_lock(&instr_x_mutex);
- erts_mtx_lock(&instr_mutex);
- }
-
- if (ptr) {
- MapStatBlock_t *mb;
-
- mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE);
- mb = (MapStatBlock_t *) mptr;
-
- stat_upd_free(n, mb->size);
-
- if (mb->prev)
- mb->prev->next = mb->next;
- else
- mem_anchor = mb->next;
- if (mb->next)
- mb->next->prev = mb->prev;
- }
- else {
- mptr = NULL;
- }
-
- (*real_af->free)(n, real_af->extra, mptr);
-
- if (!erts_is_allctr_wrapper_prelocked()) {
- erts_mtx_unlock(&instr_mutex);
- erts_mtx_unlock(&instr_x_mutex);
- }
-
-}
-
-static void dump_memory_map_to_stream(FILE *fp)
-{
- ErtsAlcType_t n;
- MapStatBlock_t *bp;
- int lock = !ERTS_IS_CRASH_DUMPING;
- if (lock) {
- ASSERT(!erts_is_allctr_wrapper_prelocked());
- erts_mtx_lock(&instr_mutex);
- }
-
- /* Write header */
-
- fprintf(fp,
- "{instr_hdr,\n"
- " %lu,\n"
- " %lu,\n"
- " {",
- (unsigned long) ERTS_INSTR_VSN,
- (unsigned long) MAP_STAT_BLOCK_HEADER_SIZE);
-
-#if ERTS_ALC_N_MIN != 1
-#error ERTS_ALC_N_MIN is not 1
-#endif
-
- for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) {
- ErtsAlcType_t t = ERTS_ALC_N2T(n);
- ErtsAlcType_t a = ERTS_ALC_T2A(t);
- ErtsAlcType_t c = ERTS_ALC_T2C(t);
- const char *astr;
-
- if (erts_allctrs_info[a].enabled)
- astr = ERTS_ALC_A2AD(a);
- else
- astr = ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM);
-
- fprintf(fp,
- "%s{%s,%s,%s}%s",
- (n == ERTS_ALC_N_MIN) ? "" : " ",
- ERTS_ALC_N2TD(n),
- astr,
- ERTS_ALC_C2CD(c),
- (n == ERTS_ALC_N_MAX) ? "" : ",\n");
- }
-
- fprintf(fp, "}}.\n");
-
- /* Write memory data */
- for (bp = mem_anchor; bp; bp = bp->next) {
- if (is_internal_pid(bp->pid))
- fprintf(fp,
- "{%lu, %lu, %lu, {%lu,%lu,%lu}}.\n",
- (UWord) bp->type_no,
- (UWord) bp->mem,
- (UWord) bp->size,
- (UWord) pid_channel_no(bp->pid),
- (UWord) pid_number(bp->pid),
- (UWord) pid_serial(bp->pid));
- else
- fprintf(fp,
- "{%lu, %lu, %lu, undefined}.\n",
- (UWord) bp->type_no,
- (UWord) bp->mem,
- (UWord) bp->size);
- }
-
- if (lock)
- erts_mtx_unlock(&instr_mutex);
-}
-
-int erts_instr_dump_memory_map_to_fd(int fd)
-{
- char buf[BUFSIZ];
- FILE *f;
-
- if (!erts_instr_memory_map)
- return 0;
-
- f = fdopen(fd, "w");
- if (f == NULL)
- return 0;
-
- /* Avoid allocating memory; we may have run out of it at this point. */
- setbuf(f, buf);
-
- dump_memory_map_to_stream(f);
- fflush(f);
- return 1;
-}
-
-int erts_instr_dump_memory_map(const char *name)
-{
- FILE *f;
-
- if (!erts_instr_memory_map)
- return 0;
-
- f = fopen(name, "w");
- if (f == NULL)
- return 0;
-
- dump_memory_map_to_stream(f);
-
- fclose(f);
- return 1;
-}
-
-Eterm erts_instr_get_memory_map(Process *proc)
-{
- MapStatBlock_t *org_mem_anchor;
- Eterm hdr_tuple, md_list, res;
- Eterm *hp;
- Uint hsz;
- MapStatBlock_t *bp;
-#ifdef DEBUG
- Eterm *end_hp;
-#endif
-
- if (!erts_instr_memory_map)
- return am_false;
-
- if (!atoms_initialized)
- init_atoms();
- if (!am_n)
- init_am_n();
- if (!am_c)
- init_am_c();
- if (!am_a)
- init_am_a();
-
- erts_mtx_lock(&instr_x_mutex);
- erts_mtx_lock(&instr_mutex);
-
- /* Header size */
- hsz = 5 + 1 + (ERTS_ALC_N_MAX+1-ERTS_ALC_N_MIN)*(1 + 4);
-
- /* Memory data list */
- for (bp = mem_anchor; bp; bp = bp->next) {
- if (is_internal_pid(bp->pid)) {
-#if (_PID_NUM_SIZE - 1 > MAX_SMALL)
- if (internal_pid_number(bp->pid) > MAX_SMALL)
- hsz += BIG_UINT_HEAP_SIZE;
-#endif
-#if (_PID_SER_SIZE - 1 > MAX_SMALL)
- if (internal_pid_serial(bp->pid) > MAX_SMALL)
- hsz += BIG_UINT_HEAP_SIZE;
-#endif
- hsz += 4;
- }
-
- if ((UWord) bp->mem > MAX_SMALL)
- hsz += BIG_UINT_HEAP_SIZE;
- if (bp->size > MAX_SMALL)
- hsz += BIG_UINT_HEAP_SIZE;
-
- hsz += 5 + 2;
- }
-
- hsz += 3; /* Root tuple */
-
- org_mem_anchor = mem_anchor;
- mem_anchor = NULL;
-
- erts_mtx_unlock(&instr_mutex);
-
- hp = HAlloc(proc, hsz); /* May end up calling map_stat_alloc() */
-
- erts_mtx_lock(&instr_mutex);
-
-#ifdef DEBUG
- end_hp = hp + hsz;
-#endif
-
- { /* Build header */
- ErtsAlcType_t n;
- Eterm type_map;
- Uint *hp2 = hp;
-#ifdef DEBUG
- Uint *hp2_end;
-#endif
-
- hp += (ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN)*4;
-
-#ifdef DEBUG
- hp2_end = hp;
-#endif
-
- type_map = make_tuple(hp);
- *(hp++) = make_arityval(ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN);
-
- for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) {
- ErtsAlcType_t t = ERTS_ALC_N2T(n);
- ErtsAlcType_t a = ERTS_ALC_T2A(t);
- ErtsAlcType_t c = ERTS_ALC_T2C(t);
-
- if (!erts_allctrs_info[a].enabled)
- a = ERTS_ALC_A_SYSTEM;
-
- *(hp++) = TUPLE3(hp2, am_n[n], am_a[a], am_c[c]);
- hp2 += 4;
- }
-
- ASSERT(hp2 == hp2_end);
-
- hdr_tuple = TUPLE4(hp,
- am.instr_hdr,
- make_small(ERTS_INSTR_VSN),
- make_small(MAP_STAT_BLOCK_HEADER_SIZE),
- type_map);
-
- hp += 5;
- }
-
- /* Build memory data list */
-
- for (md_list = NIL, bp = org_mem_anchor; bp; bp = bp->next) {
- Eterm tuple;
- Eterm type;
- Eterm ptr;
- Eterm size;
- Eterm pid;
-
- if (is_not_internal_pid(bp->pid))
- pid = am_undefined;
- else {
- Eterm c;
- Eterm n;
- Eterm s;
-
-#if (ERST_INTERNAL_CHANNEL_NO > MAX_SMALL)
-#error Oversized internal channel number
-#endif
- c = make_small(ERST_INTERNAL_CHANNEL_NO);
-
-#if (_PID_NUM_SIZE - 1 > MAX_SMALL)
- if (internal_pid_number(bp->pid) > MAX_SMALL) {
- n = uint_to_big(internal_pid_number(bp->pid), hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- else
-#endif
- n = make_small(internal_pid_number(bp->pid));
-
-#if (_PID_SER_SIZE - 1 > MAX_SMALL)
- if (internal_pid_serial(bp->pid) > MAX_SMALL) {
- s = uint_to_big(internal_pid_serial(bp->pid), hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- else
-#endif
- s = make_small(internal_pid_serial(bp->pid));
- pid = TUPLE3(hp, c, n, s);
- hp += 4;
- }
-
-
-#if ERTS_ALC_N_MAX > MAX_SMALL
-#error Oversized memory type number
-#endif
- type = make_small(bp->type_no);
-
- if ((UWord) bp->mem > MAX_SMALL) {
- ptr = uint_to_big((UWord) bp->mem, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- else
- ptr = make_small((UWord) bp->mem);
-
- if (bp->size > MAX_SMALL) {
- size = uint_to_big(bp->size, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- else
- size = make_small(bp->size);
-
- tuple = TUPLE4(hp, type, ptr, size, pid);
- hp += 5;
-
- md_list = CONS(hp, tuple, md_list);
- hp += 2;
- }
-
- res = TUPLE2(hp, hdr_tuple, md_list);
-
- ASSERT(hp + 3 == end_hp);
-
- if (mem_anchor) {
- for (bp = mem_anchor; bp->next; bp = bp->next)
- ;
- ASSERT(org_mem_anchor);
- org_mem_anchor->prev = bp;
- bp->next = org_mem_anchor;
- }
- else {
- mem_anchor = org_mem_anchor;
- }
-
- erts_mtx_unlock(&instr_mutex);
- erts_mtx_unlock(&instr_x_mutex);
-
- return res;
-}
-
-static ERTS_INLINE void
-begin_new_max_period(Stat_t *stat, int min, int max)
-{
- int i;
- for (i = min; i <= max; i++) {
- stat[i].max_size = stat[i].size;
- stat[i].max_blocks = stat[i].blocks;
- }
-}
-
-static ERTS_INLINE void
-update_max_ever_values(Stat_t *stat, int min, int max)
-{
- int i;
- for (i = min; i <= max; i++) {
- if (stat[i].max_size_ever < stat[i].max_size)
- stat[i].max_size_ever = stat[i].max_size;
- if (stat[i].max_blocks_ever < stat[i].max_blocks)
- stat[i].max_blocks_ever = stat[i].max_blocks;
- }
-}
-
-#define bld_string erts_bld_string
-#define bld_tuple erts_bld_tuple
-#define bld_tuplev erts_bld_tuplev
-#define bld_list erts_bld_list
-#define bld_2tup_list erts_bld_2tup_list
-#define bld_uint erts_bld_uint
-
-Eterm
-erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period)
-{
- int i, len, max, min, allctr;
- Eterm *names, *values, res;
- Uint arr_size, stat_size, hsz, *hszp, *hp, **hpp;
- Stat_t *stat_src, *stat;
-
- if (!erts_instr_stat)
- return am_false;
-
- if (!atoms_initialized)
- init_atoms();
-
- if (what == am.total) {
- min = 0;
- max = 0;
- allctr = 0;
- stat_size = sizeof(Stat_t);
- stat_src = &stats->tot;
- if (!am_tot)
- init_am_tot();
- names = am_tot;
- }
- else if (what == am.allocators) {
- min = ERTS_ALC_A_MIN;
- max = ERTS_ALC_A_MAX;
- allctr = 1;
- stat_size = sizeof(Stat_t)*(ERTS_ALC_A_MAX+1);
- stat_src = stats->a;
- if (!am_a)
- init_am_a();
- names = am_a;
- }
- else if (what == am.classes) {
- min = ERTS_ALC_C_MIN;
- max = ERTS_ALC_C_MAX;
- allctr = 0;
- stat_size = sizeof(Stat_t)*(ERTS_ALC_C_MAX+1);
- stat_src = stats->c;
- if (!am_c)
- init_am_c();
- names = &am_c[ERTS_ALC_C_MIN];
- }
- else if (what == am.types) {
- min = ERTS_ALC_N_MIN;
- max = ERTS_ALC_N_MAX;
- allctr = 0;
- stat_size = sizeof(Stat_t)*(ERTS_ALC_N_MAX+1);
- stat_src = stats->n;
- if (!am_n)
- init_am_n();
- names = &am_n[ERTS_ALC_N_MIN];
- }
- else {
- return THE_NON_VALUE;
- }
-
- stat = (Stat_t *) erts_alloc(ERTS_ALC_T_TMP, stat_size);
-
- arr_size = (max - min + 1)*sizeof(Eterm);
-
- if (allctr)
- names = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size);
-
- values = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size);
-
- erts_mtx_lock(&instr_mutex);
-
- update_max_ever_values(stat_src, min, max);
-
- sys_memcpy((void *) stat, (void *) stat_src, stat_size);
-
- if (begin_max_period)
- begin_new_max_period(stat_src, min, max);
-
- erts_mtx_unlock(&instr_mutex);
-
- hsz = 0;
- hszp = &hsz;
- hpp = NULL;
-
- restart_bld:
-
- len = 0;
- for (i = min; i <= max; i++) {
- if (!allctr || erts_allctrs_info[i].enabled) {
- Eterm s[2];
-
- if (allctr)
- names[len] = am_a[i];
-
- s[0] = bld_tuple(hpp, hszp, 4,
- am.sizes,
- bld_uint(hpp, hszp, stat[i].size),
- bld_uint(hpp, hszp, stat[i].max_size),
- bld_uint(hpp, hszp, stat[i].max_size_ever));
-
- s[1] = bld_tuple(hpp, hszp, 4,
- am.blocks,
- bld_uint(hpp, hszp, stat[i].blocks),
- bld_uint(hpp, hszp, stat[i].max_blocks),
- bld_uint(hpp, hszp, stat[i].max_blocks_ever));
-
- values[len] = bld_list(hpp, hszp, 2, s);
-
- len++;
- }
- }
-
- res = bld_2tup_list(hpp, hszp, len, names, values);
-
- if (!hpp) {
- hp = HAlloc(proc, hsz);
- hszp = NULL;
- hpp = &hp;
- goto restart_bld;
- }
-
- erts_free(ERTS_ALC_T_TMP, (void *) stat);
- erts_free(ERTS_ALC_T_TMP, (void *) values);
- if (allctr)
- erts_free(ERTS_ALC_T_TMP, (void *) names);
-
- return res;
-}
-
-static void
-dump_stat_to_stream(FILE *fp, int begin_max_period)
-{
- ErtsAlcType_t i, a_max, a_min;
-
- erts_mtx_lock(&instr_mutex);
-
- fprintf(fp,
- "{instr_vsn,%lu}.\n",
- (unsigned long) ERTS_INSTR_VSN);
-
- update_max_ever_values(&stats->tot, 0, 0);
-
- fprintf(fp,
- "{total,[{total,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}]}.\n",
- (UWord) stats->tot.size,
- (UWord) stats->tot.max_size,
- (UWord) stats->tot.max_size_ever,
- (UWord) stats->tot.blocks,
- (UWord) stats->tot.max_blocks,
- (UWord) stats->tot.max_blocks_ever);
-
- a_max = 0;
- a_min = ~0;
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- if (erts_allctrs_info[i].enabled) {
- if (a_min > i)
- a_min = i;
- if (a_max < i)
- a_max = i;
- }
- }
-
- ASSERT(ERTS_ALC_A_MIN <= a_min && a_min <= ERTS_ALC_A_MAX);
- ASSERT(ERTS_ALC_A_MIN <= a_max && a_max <= ERTS_ALC_A_MAX);
- ASSERT(a_min <= a_max);
-
- update_max_ever_values(stats->a, a_min, a_max);
-
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- if (erts_allctrs_info[i].enabled) {
- fprintf(fp,
- "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
- i == a_min ? "{allocators,\n [" : " ",
- ERTS_ALC_A2AD(i),
- (UWord) stats->a[i].size,
- (UWord) stats->a[i].max_size,
- (UWord) stats->a[i].max_size_ever,
- (UWord) stats->a[i].blocks,
- (UWord) stats->a[i].max_blocks,
- (UWord) stats->a[i].max_blocks_ever,
- i == a_max ? "]}.\n" : ",\n");
- }
- }
-
- update_max_ever_values(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX);
-
- for (i = ERTS_ALC_C_MIN; i <= ERTS_ALC_C_MAX; i++) {
- fprintf(fp,
- "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
- i == ERTS_ALC_C_MIN ? "{classes,\n [" : " ",
- ERTS_ALC_C2CD(i),
- (UWord) stats->c[i].size,
- (UWord) stats->c[i].max_size,
- (UWord) stats->c[i].max_size_ever,
- (UWord) stats->c[i].blocks,
- (UWord) stats->c[i].max_blocks,
- (UWord) stats->c[i].max_blocks_ever,
- i == ERTS_ALC_C_MAX ? "]}.\n" : ",\n" );
- }
-
- update_max_ever_values(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX);
-
- for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) {
- fprintf(fp,
- "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
- i == ERTS_ALC_N_MIN ? "{types,\n [" : " ",
- ERTS_ALC_N2TD(i),
- (UWord) stats->n[i].size,
- (UWord) stats->n[i].max_size,
- (UWord) stats->n[i].max_size_ever,
- (UWord) stats->n[i].blocks,
- (UWord) stats->n[i].max_blocks,
- (UWord) stats->n[i].max_blocks_ever,
- i == ERTS_ALC_N_MAX ? "]}.\n" : ",\n" );
- }
-
- if (begin_max_period) {
- begin_new_max_period(&stats->tot, 0, 0);
- begin_new_max_period(stats->a, a_min, a_max);
- begin_new_max_period(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX);
- begin_new_max_period(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX);
- }
-
- erts_mtx_unlock(&instr_mutex);
-
-}
-
-int erts_instr_dump_stat_to_fd(int fd, int begin_max_period)
-{
- char buf[BUFSIZ];
- FILE *fp;
-
- if (!erts_instr_stat)
- return 0;
-
- fp = fdopen(fd, "w");
- if (fp == NULL)
- return 0;
-
- /* Avoid allocating memory; we may have run out of it at this point. */
- setbuf(fp, buf);
-
- dump_stat_to_stream(fp, begin_max_period);
- fflush(fp);
- return 1;
-}
-
-int erts_instr_dump_stat(const char *name, int begin_max_period)
-{
- FILE *file;
-
- if (!erts_instr_stat)
- return 0;
-
- file = fopen(name, "w");
- if (file == NULL)
- return 0;
-
- dump_stat_to_stream(file, begin_max_period);
-
- fclose(file);
- return 1;
-}
-
-
-Uint
-erts_instr_get_total(void)
-{
- return erts_instr_stat ? stats->tot.size : 0;
-}
-
-Uint
-erts_instr_get_max_total(void)
-{
- if (erts_instr_stat) {
- update_max_ever_values(&stats->tot, 0, 0);
- return stats->tot.max_size_ever;
- }
- return 0;
-}
-
-Eterm
-erts_instr_get_type_info(Process *proc)
-{
- Eterm res, *tpls;
- Uint hsz, *hszp, *hp, **hpp;
- ErtsAlcType_t n;
-
- if (!am_n)
- init_am_n();
- if (!am_a)
- init_am_a();
- if (!am_c)
- init_am_c();
-
- tpls = (Eterm *) erts_alloc(ERTS_ALC_T_TMP,
- (ERTS_ALC_N_MAX-ERTS_ALC_N_MIN+1)
- * sizeof(Eterm));
- hsz = 0;
- hszp = &hsz;
- hpp = NULL;
-
- restart_bld:
-
-#if ERTS_ALC_N_MIN != 1
-#error ERTS_ALC_N_MIN is not 1
-#endif
-
- for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) {
- ErtsAlcType_t t = ERTS_ALC_N2T(n);
- ErtsAlcType_t a = ERTS_ALC_T2A(t);
- ErtsAlcType_t c = ERTS_ALC_T2C(t);
-
- if (!erts_allctrs_info[a].enabled)
- a = ERTS_ALC_A_SYSTEM;
-
- tpls[n - ERTS_ALC_N_MIN]
- = bld_tuple(hpp, hszp, 3, am_n[n], am_a[a], am_c[c]);
- }
-
- res = bld_tuplev(hpp, hszp, ERTS_ALC_N_MAX-ERTS_ALC_N_MIN+1, tpls);
-
- if (!hpp) {
- hp = HAlloc(proc, hsz);
- hszp = NULL;
- hpp = &hp;
- goto restart_bld;
- }
-
- erts_free(ERTS_ALC_T_TMP, tpls);
-
- return res;
-}
-
-Uint
-erts_instr_init(int stat, int map_stat)
-{
- Uint extra_sz;
- int i;
-
- am_tot = NULL;
- am_n = NULL;
- am_c = NULL;
- am_a = NULL;
-
- erts_instr_memory_map = 0;
- erts_instr_stat = 0;
- atoms_initialized = 0;
-
- if (!stat && !map_stat)
- return 0;
-
- stats = erts_alloc(ERTS_ALC_T_INSTR_INFO, sizeof(struct stats_));
-
- erts_mtx_init(&instr_mutex, "instr");
-
- mem_anchor = NULL;
-
- /* Install instrumentation functions */
- ERTS_CT_ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
-
- sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
-
- sys_memzero((void *) &stats->tot, sizeof(Stat_t));
- sys_memzero((void *) stats->a, sizeof(Stat_t)*(ERTS_ALC_A_MAX+1));
- sys_memzero((void *) stats->c, sizeof(Stat_t)*(ERTS_ALC_C_MAX+1));
- sys_memzero((void *) stats->n, sizeof(Stat_t)*(ERTS_ALC_N_MAX+1));
-
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- if (erts_allctrs_info[i].enabled)
- stats->ap[i] = &stats->a[i];
- else
- stats->ap[i] = &stats->a[ERTS_ALC_A_SYSTEM];
- }
-
- if (map_stat) {
-
- erts_mtx_init(&instr_x_mutex, "instr_x");
-
- erts_instr_memory_map = 1;
- erts_instr_stat = 1;
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- erts_allctrs[i].alloc = map_stat_alloc;
- erts_allctrs[i].realloc = map_stat_realloc;
- erts_allctrs[i].free = map_stat_free;
- erts_allctrs[i].extra = (void *) &real_allctrs[i];
- }
- instr_wrapper.lock = map_stat_pre_lock;
- instr_wrapper.unlock = map_stat_pre_unlock;
- extra_sz = MAP_STAT_BLOCK_HEADER_SIZE;
- }
- else {
- erts_instr_stat = 1;
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- erts_allctrs[i].alloc = stat_alloc;
- erts_allctrs[i].realloc = stat_realloc;
- erts_allctrs[i].free = stat_free;
- erts_allctrs[i].extra = (void *) &real_allctrs[i];
- }
- instr_wrapper.lock = stat_pre_lock;
- instr_wrapper.unlock = stat_pre_unlock;
- extra_sz = STAT_BLOCK_HEADER_SIZE;
- }
- erts_allctr_wrapper_prelock_init(&instr_wrapper);
- return extra_sz;
-}
-
diff --git a/erts/emulator/beam/erl_instrument.h b/erts/emulator/beam/erl_instrument.h
deleted file mode 100644
index 1f04c91d5e..0000000000
--- a/erts/emulator/beam/erl_instrument.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifndef ERL_INSTRUMENT_H__
-#define ERL_INSTRUMENT_H__
-
-#include "erl_mtrace.h"
-
-#define ERTS_INSTR_VSN 2
-
-extern int erts_instr_memory_map;
-extern int erts_instr_stat;
-
-Uint erts_instr_init(int stat, int map_stat);
-int erts_instr_dump_memory_map_to_fd(int fd);
-int erts_instr_dump_memory_map(const char *name);
-Eterm erts_instr_get_memory_map(Process *process);
-int erts_instr_dump_stat_to_fd(int fd, int begin_max_period);
-int erts_instr_dump_stat(const char *name, int begin_max_period);
-Eterm erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period);
-Eterm erts_instr_get_type_info(Process *proc);
-Uint erts_instr_get_total(void);
-Uint erts_instr_get_max_total(void);
-
-#endif
diff --git a/erts/emulator/beam/erl_io_queue.c b/erts/emulator/beam/erl_io_queue.c
new file mode 100644
index 0000000000..2ae5b56b5c
--- /dev/null
+++ b/erts/emulator/beam/erl_io_queue.c
@@ -0,0 +1,1240 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017-2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "global.h"
+
+#define ERL_WANT_HIPE_BIF_WRAPPER__
+#include "bif.h"
+#undef ERL_WANT_HIPE_BIF_WRAPPER__
+
+#include "erl_bits.h"
+#include "erl_io_queue.h"
+
+#define IOL2V_SMALL_BIN_LIMIT (ERL_ONHEAP_BIN_LIMIT * 4)
+
+static void free_binary(ErtsIOQBinary *b, int driver);
+static ErtsIOQBinary *alloc_binary(Uint size, char *source, void **iov_base, int driver);
+
+void erts_ioq_init(ErtsIOQueue *q, ErtsAlcType_t alct, int driver)
+{
+
+ ERTS_CT_ASSERT(offsetof(ErlNifIOVec,flags) == sizeof(ErtsIOVecCommon));
+ ERTS_CT_ASSERT(sizeof(ErlIOVec) == sizeof(ErtsIOVecCommon));
+ ERTS_CT_ASSERT(sizeof(size_t) == sizeof(ErlDrvSizeT));
+ ERTS_CT_ASSERT(sizeof(size_t) == sizeof(Uint));
+
+ q->alct = alct;
+ q->driver = driver;
+ q->size = 0;
+ q->v_head = q->v_tail = q->v_start = q->v_small;
+ q->v_end = q->v_small + ERTS_SMALL_IO_QUEUE;
+ q->b_head = q->b_tail = q->b_start = q->b_small;
+ q->b_end = q->b_small + ERTS_SMALL_IO_QUEUE;
+}
+
+void erts_ioq_clear(ErtsIOQueue *q)
+{
+ ErtsIOQBinary** binp = q->b_head;
+ int driver = q->driver;
+
+ if (q->v_start != q->v_small)
+ erts_free(q->alct, (void *) q->v_start);
+
+ while(binp < q->b_tail) {
+ if (*binp != NULL)
+ free_binary(*binp, driver);
+ binp++;
+ }
+ if (q->b_start != q->b_small)
+ erts_free(q->alct, (void *) q->b_start);
+ q->v_start = q->v_end = q->v_head = q->v_tail = NULL;
+ q->b_start = q->b_end = q->b_head = q->b_tail = NULL;
+ q->size = 0;
+}
+
+static void free_binary(ErtsIOQBinary *b, int driver)
+{
+ if (driver)
+ driver_free_binary(&b->driver);
+ else if (erts_refc_dectest(&b->nif.intern.refc, 0) == 0)
+ erts_bin_free(&b->nif);
+}
+
+static ErtsIOQBinary *alloc_binary(Uint size, char *source, void **iov_base, int driver)
+{
+ if (driver) {
+ ErlDrvBinary *bin = driver_alloc_binary(size);
+ if (!bin) return NULL;
+ sys_memcpy(bin->orig_bytes, source, size);
+ *iov_base = bin->orig_bytes;
+ return (ErtsIOQBinary *)bin;
+ } else {
+ /* This clause can be triggered in enif_ioq_enq_binary is used */
+ Binary *bin = erts_bin_nrml_alloc(size);
+ if (!bin) return NULL;
+ erts_refc_init(&bin->intern.refc, 1);
+ sys_memcpy(bin->orig_bytes, source, size);
+ *iov_base = bin->orig_bytes;
+ return (ErtsIOQBinary *)bin;
+ }
+}
+
+Uint erts_ioq_size(ErtsIOQueue *q)
+{
+ return q->size;
+}
+
+/* expand queue to hold n elements in tail or head */
+static int expandq(ErtsIOQueue* q, int n, int tail)
+/* tail: 0 if make room in head, make room in tail otherwise */
+{
+ int h_sz; /* room before header */
+ int t_sz; /* room after tail */
+ int q_sz; /* occupied */
+ int nvsz;
+ SysIOVec* niov;
+ ErtsIOQBinary** nbinv;
+
+ h_sz = q->v_head - q->v_start;
+ t_sz = q->v_end - q->v_tail;
+ q_sz = q->v_tail - q->v_head;
+
+ if (tail && (n <= t_sz)) /* do we need to expand tail? */
+ return 0;
+ else if (!tail && (n <= h_sz)) /* do we need to expand head? */
+ return 0;
+ else if (n > (h_sz + t_sz)) { /* need to allocate */
+ /* we may get little extra but it ok */
+ nvsz = (q->v_end - q->v_start) + n;
+
+ niov = erts_alloc_fnf(q->alct, nvsz * sizeof(SysIOVec));
+ if (!niov)
+ return -1;
+ nbinv = erts_alloc_fnf(q->alct, nvsz * sizeof(ErtsIOQBinary**));
+ if (!nbinv) {
+ erts_free(q->alct, (void *) niov);
+ return -1;
+ }
+ if (tail) {
+ sys_memcpy(niov, q->v_head, q_sz*sizeof(SysIOVec));
+ if (q->v_start != q->v_small)
+ erts_free(q->alct, (void *) q->v_start);
+ q->v_start = niov;
+ q->v_end = niov + nvsz;
+ q->v_head = q->v_start;
+ q->v_tail = q->v_head + q_sz;
+
+ sys_memcpy(nbinv, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ if (q->b_start != q->b_small)
+ erts_free(q->alct, (void *) q->b_start);
+ q->b_start = nbinv;
+ q->b_end = nbinv + nvsz;
+ q->b_head = q->b_start;
+ q->b_tail = q->b_head + q_sz;
+ }
+ else {
+ sys_memcpy(niov+nvsz-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
+ if (q->v_start != q->v_small)
+ erts_free(q->alct, (void *) q->v_start);
+ q->v_start = niov;
+ q->v_end = niov + nvsz;
+ q->v_tail = q->v_end;
+ q->v_head = q->v_tail - q_sz;
+
+ sys_memcpy(nbinv+nvsz-q_sz, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ if (q->b_start != q->b_small)
+ erts_free(q->alct, (void *) q->b_start);
+ q->b_start = nbinv;
+ q->b_end = nbinv + nvsz;
+ q->b_tail = q->b_end;
+ q->b_head = q->b_tail - q_sz;
+ }
+ }
+ else if (tail) { /* move to beginning to make room in tail */
+ sys_memmove(q->v_start, q->v_head, q_sz*sizeof(SysIOVec));
+ q->v_head = q->v_start;
+ q->v_tail = q->v_head + q_sz;
+ sys_memmove(q->b_start, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ q->b_head = q->b_start;
+ q->b_tail = q->b_head + q_sz;
+ }
+ else { /* move to end to make room */
+ sys_memmove(q->v_end-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
+ q->v_tail = q->v_end;
+ q->v_head = q->v_tail-q_sz;
+ sys_memmove(q->b_end-q_sz, q->b_head, q_sz*sizeof(ErtsIOQBinary*));
+ q->b_tail = q->b_end;
+ q->b_head = q->b_tail-q_sz;
+ }
+
+ return 0;
+}
+
+static
+int skip(ErtsIOVec* vec, Uint skipbytes,
+ SysIOVec **iovp, ErtsIOQBinary ***binvp,
+ Uint *lenp)
+{
+ int n;
+ Uint len;
+ SysIOVec* iov;
+ ErtsIOQBinary** binv;
+
+ if (vec->common.size <= skipbytes)
+ return -1;
+
+ iov = vec->common.iov;
+ binv = vec->common.binv;
+ n = vec->common.vsize;
+ /* we use do here to strip iov_len=0 from beginning */
+ do {
+ len = iov->iov_len;
+ if (len <= skipbytes) {
+ skipbytes -= len;
+ iov++;
+ binv++;
+ n--;
+ }
+ else {
+ iov->iov_base = ((char *)(iov->iov_base)) + skipbytes;
+ iov->iov_len -= skipbytes;
+ skipbytes = 0;
+ }
+ } while(skipbytes > 0);
+
+ *binvp = binv;
+ *iovp = iov;
+ *lenp = len;
+
+ return n;
+}
+
+/* Put elements from vec at q tail */
+int erts_ioq_enqv(ErtsIOQueue *q, ErtsIOVec *eiov, Uint skipbytes)
+{
+ int n;
+ Uint len;
+ Uint size = eiov->common.size - skipbytes;
+ SysIOVec *iov;
+ ErtsIOQBinary** binv;
+ ErtsIOQBinary* b;
+
+ if (q == NULL)
+ return -1;
+
+ ASSERT(eiov->common.size >= skipbytes);
+ if (eiov->common.size <= skipbytes)
+ return 0;
+
+ n = skip(eiov, skipbytes, &iov, &binv, &len);
+
+ if (n < 0)
+ return n;
+
+ if (q->v_tail + n >= q->v_end)
+ if (expandq(q, n, 1))
+ return -1;
+
+ /* Queue and reference all binaries (remove zero length items) */
+ while(n--) {
+ if ((len = iov->iov_len) > 0) {
+ if ((b = *binv) == NULL) { /* special case create binary ! */
+ b = alloc_binary(len, iov->iov_base, (void**)&q->v_tail->iov_base,
+ q->driver);
+ if (!b) return -1;
+ *q->b_tail++ = b;
+ q->v_tail->iov_len = len;
+ q->v_tail++;
+ }
+ else {
+ if (q->driver)
+ driver_binary_inc_refc(&b->driver);
+ else
+ erts_refc_inc(&b->nif.intern.refc, 1);
+ *q->b_tail++ = b;
+ *q->v_tail++ = *iov;
+ }
+ }
+ iov++;
+ binv++;
+ }
+ q->size += size; /* update total size in queue */
+ return 0;
+}
+
+/* Put elements from vec at q head */
+int erts_ioq_pushqv(ErtsIOQueue *q, ErtsIOVec* vec, Uint skipbytes)
+{
+ int n;
+ Uint len;
+ Uint size = vec->common.size - skipbytes;
+ SysIOVec* iov;
+ ErtsIOQBinary** binv;
+ ErtsIOQBinary* b;
+
+ if (q == NULL)
+ return -1;
+
+ ASSERT(vec->common.size >= skipbytes);
+ if (vec->common.size <= skipbytes)
+ return 0;
+
+ n = skip(vec, skipbytes, &iov, &binv, &len);
+
+ if (n < 0)
+ return n;
+
+ if (q->v_head - n < q->v_start)
+ if (expandq(q, n, 0))
+ return -1;
+
+ /* Queue and reference all binaries (remove zero length items) */
+ iov += (n-1); /* move to end */
+ binv += (n-1); /* move to end */
+ while(n--) {
+ if ((len = iov->iov_len) > 0) {
+ if ((b = *binv) == NULL) { /* special case create binary ! */
+ if (q->driver) {
+ ErlDrvBinary *bin = driver_alloc_binary(len);
+ if (!bin) return -1;
+ sys_memcpy(bin->orig_bytes, iov->iov_base, len);
+ b = (ErtsIOQBinary *)bin;
+ q->v_head->iov_base = bin->orig_bytes;
+ }
+ *--q->b_head = b;
+ q->v_head--;
+ q->v_head->iov_len = len;
+ }
+ else {
+ if (q->driver)
+ driver_binary_inc_refc(&b->driver);
+ else
+ erts_refc_inc(&b->nif.intern.refc, 1);
+ *--q->b_head = b;
+ *--q->v_head = *iov;
+ }
+ }
+ iov--;
+ binv--;
+ }
+ q->size += size; /* update total size in queue */
+ return 0;
+}
+
+
+/*
+** Remove size bytes from queue head
+** Return number of bytes that remain in queue
+*/
+int erts_ioq_deq(ErtsIOQueue *q, Uint size)
+{
+ Uint len;
+
+ if ((q == NULL) || (q->size < size))
+ return -1;
+ q->size -= size;
+ while (size > 0) {
+ ASSERT(q->v_head != q->v_tail);
+
+ len = q->v_head->iov_len;
+ if (len <= size) {
+ size -= len;
+ free_binary(*q->b_head, q->driver);
+ *q->b_head++ = NULL;
+ q->v_head++;
+ }
+ else {
+ q->v_head->iov_base = ((char *)(q->v_head->iov_base)) + size;
+ q->v_head->iov_len -= size;
+ size = 0;
+ }
+ }
+
+ /* restart pointers (optimised for enq) */
+ if (q->v_head == q->v_tail) {
+ q->v_head = q->v_tail = q->v_start;
+ q->b_head = q->b_tail = q->b_start;
+ }
+ return 0;
+}
+
+
+Uint erts_ioq_peekqv(ErtsIOQueue *q, ErtsIOVec *ev) {
+ ASSERT(ev);
+
+ if (! q) {
+ return (Uint) -1;
+ } else {
+ if ((ev->common.vsize = q->v_tail - q->v_head) == 0) {
+ ev->common.size = 0;
+ ev->common.iov = NULL;
+ ev->common.binv = NULL;
+ } else {
+ ev->common.size = q->size;
+ ev->common.iov = q->v_head;
+ ev->common.binv = q->b_head;
+ }
+ return q->size;
+ }
+}
+
+SysIOVec* erts_ioq_peekq(ErtsIOQueue *q, int* vlenp) /* length of io-vector */
+{
+
+ if (q == NULL) {
+ *vlenp = -1;
+ return NULL;
+ }
+ if ((*vlenp = (q->v_tail - q->v_head)) == 0)
+ return NULL;
+ return q->v_head;
+}
+
+/* Fills a possibly deep list of chars and binaries into vec
+** Small characters are first stored in the buffer buf of length ln
+** binaries found are copied and linked into msoh
+** Return vector length on succsess,
+** -1 on overflow
+** -2 on type error
+*/
+
+static ERTS_INLINE void
+io_list_to_vec_set_vec(SysIOVec **iov, ErtsIOQBinary ***binv,
+ ErtsIOQBinary *bin, byte *ptr, Uint len,
+ int *vlen)
+{
+ while (len > MAX_SYSIOVEC_IOVLEN) {
+ (*iov)->iov_base = ptr;
+ (*iov)->iov_len = MAX_SYSIOVEC_IOVLEN;
+ ptr += MAX_SYSIOVEC_IOVLEN;
+ len -= MAX_SYSIOVEC_IOVLEN;
+ (*iov)++;
+ (*vlen)++;
+ *(*binv)++ = bin;
+ }
+ (*iov)->iov_base = ptr;
+ (*iov)->iov_len = len;
+ *(*binv)++ = bin;
+ (*iov)++;
+ (*vlen)++;
+}
+
+int
+erts_ioq_iolist_to_vec(Eterm obj, /* io-list */
+ SysIOVec* iov, /* io vector */
+ ErtsIOQBinary** binv, /* binary reference vector */
+ ErtsIOQBinary* cbin, /* binary to store characters */
+ Uint bin_limit, /* small binaries limit */
+ int driver)
+{
+ DECLARE_ESTACK(s);
+ Eterm* objp;
+ byte *buf = NULL;
+ Uint len = 0;
+ Uint csize = 0;
+ int vlen = 0;
+ byte* cptr;
+
+ if (cbin) {
+ if (driver) {
+ buf = (byte*)cbin->driver.orig_bytes;
+ len = cbin->driver.orig_size;
+ } else {
+ buf = (byte*)cbin->nif.orig_bytes;
+ len = cbin->nif.orig_size;
+ }
+ }
+ cptr = buf;
+
+ goto L_jump_start; /* avoid push */
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_jump_start:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ if (len == 0)
+ goto L_overflow;
+ *buf++ = unsigned_val(obj);
+ csize++;
+ len--;
+ } else if (is_binary(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto handle_binary;
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ obj = CDR(objp);
+ if (is_list(obj))
+ goto L_iter_list; /* on tail */
+ else if (is_binary(obj)) {
+ goto handle_binary;
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ } else if (is_binary(obj)) {
+ Eterm real_bin;
+ Uint offset;
+ Eterm* bptr;
+ Uint size;
+ int bitoffs;
+ int bitsize;
+
+ handle_binary:
+ size = binary_size(obj);
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
+ ASSERT(bitsize == 0);
+ bptr = binary_val(real_bin);
+ if (*bptr == HEADER_PROC_BIN) {
+ ProcBin* pb = (ProcBin *) bptr;
+ if (bitoffs != 0) {
+ if (len < size) {
+ goto L_overflow;
+ }
+ erts_copy_bits(pb->bytes+offset, bitoffs, 1,
+ (byte *) buf, 0, 1, size*8);
+ csize += size;
+ buf += size;
+ len -= size;
+ } else if (bin_limit && size < bin_limit) {
+ if (len < size) {
+ goto L_overflow;
+ }
+ sys_memcpy(buf, pb->bytes+offset, size);
+ csize += size;
+ buf += size;
+ len -= size;
+ } else {
+ ErtsIOQBinary *qbin;
+ if (csize != 0) {
+ io_list_to_vec_set_vec(&iov, &binv, cbin,
+ cptr, csize, &vlen);
+ cptr = buf;
+ csize = 0;
+ }
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ }
+ if (driver)
+ qbin = (ErtsIOQBinary*)Binary2ErlDrvBinary(pb->val);
+ else
+ qbin = (ErtsIOQBinary*)pb->val;
+
+ io_list_to_vec_set_vec(
+ &iov, &binv, qbin,
+ pb->bytes+offset, size, &vlen);
+ }
+ } else {
+ ErlHeapBin* hb = (ErlHeapBin *) bptr;
+ if (len < size) {
+ goto L_overflow;
+ }
+ copy_binary_to_buffer(buf, 0,
+ ((byte *) hb->data)+offset, bitoffs,
+ 8*size);
+ csize += size;
+ buf += size;
+ len -= size;
+ }
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+
+ if (csize != 0) {
+ io_list_to_vec_set_vec(&iov, &binv, cbin, cptr, csize, &vlen);
+ }
+
+ DESTROY_ESTACK(s);
+ return vlen;
+
+ L_type_error:
+ DESTROY_ESTACK(s);
+ return -2;
+
+ L_overflow:
+ DESTROY_ESTACK(s);
+ return -1;
+}
+
+static ERTS_INLINE int
+io_list_vec_count(Eterm obj, Uint *v_size,
+ Uint *c_size, Uint *b_size, Uint *in_clist,
+ Uint *p_v_size, Uint *p_c_size, Uint *p_in_clist,
+ Uint blimit)
+{
+ Uint size = binary_size(obj);
+ Eterm real;
+ ERTS_DECLARE_DUMMY(Uint offset);
+ int bitoffs;
+ int bitsize;
+ ERTS_GET_REAL_BIN(obj, real, offset, bitoffs, bitsize);
+ if (bitsize != 0) return 1;
+ if (thing_subtag(*binary_val(real)) == REFC_BINARY_SUBTAG &&
+ bitoffs == 0) {
+ *b_size += size;
+ if (*b_size < size) return 2;
+ *in_clist = 0;
+ ++*v_size;
+ /* If iov_len is smaller then Uint we split the binary into*/
+ /* multiple smaller (2GB) elements in the iolist.*/
+ *v_size += size / MAX_SYSIOVEC_IOVLEN;
+ if (size >= blimit) {
+ *p_in_clist = 0;
+ ++*p_v_size;
+ } else {
+ *p_c_size += size;
+ if (!*p_in_clist) {
+ *p_in_clist = 1;
+ ++*p_v_size;
+ }
+ }
+ } else {
+ *c_size += size;
+ if (*c_size < size) return 2;
+ if (!*in_clist) {
+ *in_clist = 1;
+ ++*v_size;
+ }
+ *p_c_size += size;
+ if (!*p_in_clist) {
+ *p_in_clist = 1;
+ ++*p_v_size;
+ }
+ }
+ return 0;
+}
+
+#define IO_LIST_VEC_COUNT(obj) \
+ do { \
+ switch (io_list_vec_count(obj, &v_size, &c_size, \
+ &b_size, &in_clist, \
+ &p_v_size, &p_c_size, &p_in_clist, \
+ blimit)) { \
+ case 1: goto L_type_error; \
+ case 2: goto L_overflow_error; \
+ default: break; \
+ } \
+ } while(0)
+
+/*
+ * Returns 0 if successful and a non-zero value otherwise.
+ *
+ * Return values through pointers:
+ * *vsize - SysIOVec size needed for a writev
+ * *csize - Number of bytes not in binary (in the common binary)
+ * *pvsize - SysIOVec size needed if packing small binaries
+ * *pcsize - Number of bytes in the common binary if packing
+ * *total_size - Total size of iolist in bytes
+ */
+int
+erts_ioq_iolist_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ size_t* total_size, Uint blimit)
+{
+ DECLARE_ESTACK(s);
+ Eterm* objp;
+ Uint v_size = 0;
+ Uint c_size = 0;
+ Uint b_size = 0;
+ Uint in_clist = 0;
+ Uint p_v_size = 0;
+ Uint p_c_size = 0;
+ Uint p_in_clist = 0;
+ size_t total;
+
+ goto L_jump_start; /* avoid a push */
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_jump_start:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ obj = CAR(objp);
+
+ if (is_byte(obj)) {
+ c_size++;
+ if (c_size == 0) {
+ goto L_overflow_error;
+ }
+ if (!in_clist) {
+ in_clist = 1;
+ v_size++;
+ }
+ p_c_size++;
+ if (!p_in_clist) {
+ p_in_clist = 1;
+ p_v_size++;
+ }
+ }
+ else if (is_binary(obj)) {
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+
+ obj = CDR(objp);
+ if (is_list(obj))
+ goto L_iter_list; /* on tail */
+ else if (is_binary(obj)) { /* binary tail is OK */
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+ else if (is_binary(obj)) {
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+
+ total = c_size + b_size;
+ if (total < c_size) {
+ goto L_overflow_error;
+ }
+ *total_size = total;
+
+ DESTROY_ESTACK(s);
+ *vsize = v_size;
+ *csize = c_size;
+ *pvsize = p_v_size;
+ *pcsize = p_c_size;
+ return 0;
+
+ L_type_error:
+ L_overflow_error:
+ DESTROY_ESTACK(s);
+ return 1;
+}
+
+typedef struct {
+ Eterm result_head;
+ Eterm result_tail;
+ Eterm input_list;
+
+ UWord acc_size;
+ Binary *acc;
+
+ /* We yield after copying this many bytes into the accumulator (Minus
+ * eating a few on consing etc). Large binaries will only count to the
+ * extent their split (if any) resulted in a copy op. */
+ UWord bytereds_available;
+ UWord bytereds_spent;
+
+ Process *process;
+ ErtsEStack estack;
+
+ Eterm magic_reference;
+} iol2v_state_t;
+
+static int iol2v_state_destructor(Binary *data) {
+ iol2v_state_t *state = ERTS_MAGIC_BIN_UNALIGNED_DATA(data);
+
+ DESTROY_SAVED_ESTACK(&state->estack);
+
+ if (state->acc != NULL) {
+ erts_bin_free(state->acc);
+ }
+
+ return 1;
+}
+
+static void iol2v_init(iol2v_state_t *state, Process *process, Eterm input) {
+ state->process = process;
+
+ state->result_head = NIL;
+ state->result_tail = NIL;
+ state->input_list = input;
+
+ state->magic_reference = NIL;
+ state->acc_size = 0;
+ state->acc = NULL;
+
+ CLEAR_SAVED_ESTACK(&state->estack);
+}
+
+static Eterm iol2v_make_sub_bin(iol2v_state_t *state, Eterm bin_term,
+ UWord offset, UWord size) {
+ Uint byte_offset, bit_offset, bit_size;
+ ErlSubBin *sb;
+ Eterm orig_pb_term;
+
+ sb = (ErlSubBin*)HAlloc(state->process, ERL_SUB_BIN_SIZE);
+
+ ERTS_GET_REAL_BIN(bin_term, orig_pb_term,
+ byte_offset, bit_offset, bit_size);
+
+ ASSERT(bit_size == 0);
+
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->bitoffs = bit_offset;
+ sb->bitsize = 0;
+ sb->orig = orig_pb_term;
+ sb->is_writable = 0;
+
+ sb->offs = byte_offset + offset;
+ sb->size = size;
+
+ return make_binary(sb);
+}
+
+static Eterm iol2v_promote_acc(iol2v_state_t *state) {
+ Eterm bin;
+
+ bin = erts_build_proc_bin(&MSO(state->process),
+ HAlloc(state->process, PROC_BIN_SIZE),
+ erts_bin_realloc(state->acc, state->acc_size));
+ state->acc_size = 0;
+ state->acc = NULL;
+
+ return bin;
+}
+
+/* Destructively enqueues a term to the result list, saving us the hassle of
+ * having to reverse it later. This is safe since GC is disabled and we never
+ * leak the unfinished term to the outside. */
+static void iol2v_enqueue_result(iol2v_state_t *state, Eterm term) {
+ Eterm prev_tail;
+ Eterm *hp;
+
+ prev_tail = state->result_tail;
+
+ hp = HAlloc(state->process, 2);
+ state->result_tail = CONS(hp, term, NIL);
+
+ if(prev_tail != NIL) {
+ Eterm *prev_cell = list_val(prev_tail);
+ CDR(prev_cell) = state->result_tail;
+ } else {
+ state->result_head = state->result_tail;
+ }
+
+ state->bytereds_spent += 1;
+}
+
+#ifndef DEBUG
+ #define ACC_REALLOCATION_LIMIT (IOL2V_SMALL_BIN_LIMIT * 32)
+#else
+ #define ACC_REALLOCATION_LIMIT (IOL2V_SMALL_BIN_LIMIT * 4)
+#endif
+
+static void iol2v_expand_acc(iol2v_state_t *state, UWord extra) {
+ UWord required_bytes, acc_alloc_size;
+
+ ERTS_CT_ASSERT(ERTS_UWORD_MAX > ACC_REALLOCATION_LIMIT / 2);
+ ASSERT(extra >= 1);
+
+ acc_alloc_size = state->acc != NULL ? (state->acc)->orig_size : 0;
+ required_bytes = state->acc_size + extra;
+
+ if (state->acc == NULL) {
+ UWord new_size = MAX(required_bytes, IOL2V_SMALL_BIN_LIMIT);
+
+ state->acc = erts_bin_nrml_alloc(new_size);
+ } else if (required_bytes > acc_alloc_size) {
+ Binary *prev_acc;
+ UWord new_size;
+
+ if (acc_alloc_size >= ACC_REALLOCATION_LIMIT) {
+ /* We skip reallocating once we hit a certain point; it often
+ * results in extra copying and we're very likely to overallocate
+ * on anything other than absurdly long byte/heapbin sequences. */
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ iol2v_expand_acc(state, extra);
+ return;
+ }
+
+ new_size = MAX(required_bytes, acc_alloc_size * 2);
+ prev_acc = state->acc;
+
+ state->acc = erts_bin_realloc(prev_acc, new_size);
+
+ if (prev_acc != state->acc) {
+ state->bytereds_spent += state->acc_size;
+ }
+ }
+
+ state->bytereds_spent += extra;
+}
+
+static int iol2v_append_byte_seq(iol2v_state_t *state, Eterm seq_start, Eterm *seq_end) {
+ Eterm lookahead, iterator;
+ Uint observed_bits;
+ SWord seq_length;
+ char *acc_data;
+
+ lookahead = seq_start;
+ seq_length = 0;
+
+ ASSERT(state->bytereds_available > state->bytereds_spent);
+
+ while (is_list(lookahead)) {
+ Eterm *cell = list_val(lookahead);
+
+ if (!is_small(CAR(cell))) {
+ break;
+ }
+
+ if (seq_length * 2 >= (state->bytereds_available - state->bytereds_spent)) {
+ break;
+ }
+
+ lookahead = CDR(cell);
+ seq_length += 1;
+ }
+
+ ASSERT(seq_length >= 1);
+
+ iol2v_expand_acc(state, seq_length);
+
+ /* Bump a few extra reductions to account for list traversal. */
+ state->bytereds_spent += seq_length;
+
+ acc_data = &(state->acc)->orig_bytes[state->acc_size];
+ state->acc_size += seq_length;
+
+ iterator = seq_start;
+ observed_bits = 0;
+
+ while (iterator != lookahead) {
+ Eterm *cell;
+ Uint byte;
+
+ cell = list_val(iterator);
+ iterator = CDR(cell);
+
+ byte = unsigned_val(CAR(cell));
+ observed_bits |= byte;
+
+ ASSERT(acc_data < &(state->acc)->orig_bytes[state->acc_size]);
+ *(acc_data++) = byte;
+ }
+
+ if (observed_bits > UCHAR_MAX) {
+ return 0;
+ }
+
+ ASSERT(acc_data == &(state->acc)->orig_bytes[state->acc_size]);
+ *seq_end = iterator;
+
+ return 1;
+}
+
+static int iol2v_append_binary(iol2v_state_t *state, Eterm bin_term) {
+ int is_acc_small, is_bin_small;
+ UWord combined_size;
+ UWord binary_size;
+
+ Uint byte_offset, bit_offset, bit_size;
+ byte *binary_data;
+
+ Eterm *parent_header;
+ Eterm parent_binary;
+
+ ASSERT(state->bytereds_available > state->bytereds_spent);
+
+ ERTS_GET_REAL_BIN(bin_term, parent_binary, byte_offset, bit_offset, bit_size);
+ parent_header = binary_val(parent_binary);
+ binary_size = binary_size(bin_term);
+
+ if (bit_size != 0) {
+ return 0;
+ } else if (binary_size == 0) {
+ state->bytereds_spent += 1;
+ return 1;
+ }
+
+ is_acc_small = state->acc_size < IOL2V_SMALL_BIN_LIMIT;
+ is_bin_small = binary_size < IOL2V_SMALL_BIN_LIMIT;
+ combined_size = binary_size + state->acc_size;
+
+ if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) {
+ ProcBin *pb = (ProcBin*)parent_header;
+
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ }
+
+ binary_data = &((byte*)pb->bytes)[byte_offset];
+ } else {
+ ErlHeapBin *hb = (ErlHeapBin*)parent_header;
+
+ ASSERT(thing_subtag(*parent_header) == HEAP_BINARY_SUBTAG);
+ ASSERT(is_bin_small);
+
+ binary_data = &((byte*)&hb->data)[byte_offset];
+ }
+
+ if (!is_bin_small && (state->acc_size == 0 || !is_acc_small)) {
+ /* Avoid combining if we encounter an acceptably large binary while the
+ * accumulator is either empty or large enough to be returned on its
+ * own. */
+ if (state->acc_size != 0) {
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ }
+
+ iol2v_enqueue_result(state, bin_term);
+ } else if (is_bin_small || combined_size < (IOL2V_SMALL_BIN_LIMIT * 2)) {
+ /* If the candidate is small or we can't split the combination in two,
+ * then just copy it into the accumulator. */
+ iol2v_expand_acc(state, binary_size);
+
+ if (ERTS_LIKELY(bit_offset == 0)) {
+ sys_memcpy(&(state->acc)->orig_bytes[state->acc_size],
+ binary_data, binary_size);
+ } else {
+ ASSERT(binary_size <= ERTS_UWORD_MAX / 8);
+
+ erts_copy_bits(binary_data, bit_offset, 1,
+ (byte*)&(state->acc)->orig_bytes[state->acc_size], 0, 1,
+ binary_size * 8);
+ }
+
+ state->acc_size += binary_size;
+ } else {
+ /* Otherwise, append enough data for the accumulator to be valid, and
+ * then return the rest as a sub-binary. */
+ UWord spill = IOL2V_SMALL_BIN_LIMIT - state->acc_size;
+ Eterm binary_tail;
+
+ iol2v_expand_acc(state, spill);
+
+ if (ERTS_LIKELY(bit_offset == 0)) {
+ sys_memcpy(&(state->acc)->orig_bytes[state->acc_size],
+ binary_data, spill);
+ } else {
+ ASSERT(binary_size <= ERTS_UWORD_MAX / 8);
+
+ erts_copy_bits(binary_data, bit_offset, 1,
+ (byte*)&(state->acc)->orig_bytes[state->acc_size], 0, 1,
+ spill * 8);
+ }
+
+ state->acc_size += spill;
+
+ binary_tail = iol2v_make_sub_bin(state, bin_term, spill,
+ binary_size - spill);
+
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ iol2v_enqueue_result(state, binary_tail);
+ }
+
+ return 1;
+}
+
+static BIF_RETTYPE iol2v_yield(iol2v_state_t *state) {
+ if (is_nil(state->magic_reference)) {
+ iol2v_state_t *boxed_state;
+ Binary *magic_binary;
+ Eterm *hp;
+
+ magic_binary = erts_create_magic_binary_x(sizeof(*state),
+ &iol2v_state_destructor, ERTS_ALC_T_BINARY, 1);
+
+ boxed_state = ERTS_MAGIC_BIN_UNALIGNED_DATA(magic_binary);
+ sys_memcpy(boxed_state, state, sizeof(*state));
+
+ hp = HAlloc(boxed_state->process, ERTS_MAGIC_REF_THING_SIZE);
+ boxed_state->magic_reference =
+ erts_mk_magic_ref(&hp, &MSO(boxed_state->process), magic_binary);
+
+ state = boxed_state;
+ }
+
+ ERTS_BIF_YIELD1(bif_export[BIF_iolist_to_iovec_1],
+ state->process, state->magic_reference);
+}
+
+static BIF_RETTYPE iol2v_continue(iol2v_state_t *state) {
+ Eterm iterator;
+
+ DECLARE_ESTACK(s);
+ ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+
+ state->bytereds_available =
+ ERTS_BIF_REDS_LEFT(state->process) * IOL2V_SMALL_BIN_LIMIT;
+ state->bytereds_spent = 0;
+
+ if (state->estack.start) {
+ ESTACK_RESTORE(s, &state->estack);
+ }
+
+ iterator = state->input_list;
+
+ for(;;) {
+ if (state->bytereds_spent >= state->bytereds_available) {
+ ESTACK_SAVE(s, &state->estack);
+ state->input_list = iterator;
+
+ return iol2v_yield(state);
+ }
+
+ while (is_list(iterator)) {
+ Eterm *cell;
+ Eterm head;
+
+ cell = list_val(iterator);
+ head = CAR(cell);
+
+ if (is_binary(head)) {
+ if (!iol2v_append_binary(state, head)) {
+ goto l_badarg;
+ }
+
+ iterator = CDR(cell);
+ } else if (is_small(head)) {
+ Eterm seq_end;
+
+ if (!iol2v_append_byte_seq(state, iterator, &seq_end)) {
+ goto l_badarg;
+ }
+
+ iterator = seq_end;
+ } else if (is_list(head) || is_nil(head)) {
+ Eterm tail = CDR(cell);
+
+ if (!is_nil(tail)) {
+ ESTACK_PUSH(s, tail);
+ }
+
+ state->bytereds_spent += 1;
+ iterator = head;
+ } else {
+ goto l_badarg;
+ }
+
+ if (state->bytereds_spent >= state->bytereds_available) {
+ ESTACK_SAVE(s, &state->estack);
+ state->input_list = iterator;
+
+ return iol2v_yield(state);
+ }
+ }
+
+ if (is_binary(iterator)) {
+ if (!iol2v_append_binary(state, iterator)) {
+ goto l_badarg;
+ }
+ } else if (!is_nil(iterator)) {
+ goto l_badarg;
+ }
+
+ if(ESTACK_ISEMPTY(s)) {
+ break;
+ }
+
+ iterator = ESTACK_POP(s);
+ }
+
+ if (state->acc_size != 0) {
+ iol2v_enqueue_result(state, iol2v_promote_acc(state));
+ }
+
+ BUMP_REDS(state->process, state->bytereds_spent / IOL2V_SMALL_BIN_LIMIT);
+
+ CLEAR_SAVED_ESTACK(&state->estack);
+ DESTROY_ESTACK(s);
+
+ BIF_RET(state->result_head);
+
+l_badarg:
+ CLEAR_SAVED_ESTACK(&state->estack);
+ DESTROY_ESTACK(s);
+
+ if (state->acc != NULL) {
+ erts_bin_free(state->acc);
+ state->acc = NULL;
+ }
+
+ BIF_ERROR(state->process, BADARG);
+}
+
+HIPE_WRAPPER_BIF_DISABLE_GC(iolist_to_iovec, 1)
+
+BIF_RETTYPE iolist_to_iovec_1(BIF_ALIST_1) {
+ BIF_RETTYPE result;
+
+ if (is_nil(BIF_ARG_1)) {
+ BIF_RET(NIL);
+ } else if (is_binary(BIF_ARG_1)) {
+ if (binary_bitsize(BIF_ARG_1) != 0) {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_ERROR(BIF_P, BADARG);
+ } else if (binary_size(BIF_ARG_1) != 0) {
+ Eterm *hp = HAlloc(BIF_P, 2);
+
+ BIF_RET(CONS(hp, BIF_ARG_1, NIL));
+ } else {
+ BIF_RET(NIL);
+ }
+ } else if (is_internal_magic_ref(BIF_ARG_1)) {
+ iol2v_state_t *state;
+ Binary *magic;
+
+ magic = erts_magic_ref2bin(BIF_ARG_1);
+
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != &iol2v_state_destructor) {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ASSERT(BIF_P->flags & F_DISABLE_GC);
+
+ state = ERTS_MAGIC_BIN_UNALIGNED_DATA(magic);
+ result = iol2v_continue(state);
+ } else if (!is_list(BIF_ARG_1)) {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_ERROR(BIF_P, BADARG);
+ } else {
+ iol2v_state_t state;
+
+ iol2v_init(&state, BIF_P, BIF_ARG_1);
+
+ erts_set_gc_state(BIF_P, 0);
+
+ result = iol2v_continue(&state);
+ }
+
+ if (result != THE_NON_VALUE || BIF_P->freason != TRAP) {
+ erts_set_gc_state(BIF_P, 1);
+ }
+
+ BIF_RET(result);
+}
diff --git a/erts/emulator/beam/erl_io_queue.h b/erts/emulator/beam/erl_io_queue.h
new file mode 100644
index 0000000000..7d0fe6751c
--- /dev/null
+++ b/erts/emulator/beam/erl_io_queue.h
@@ -0,0 +1,201 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A queue used for storing binary data that should be
+ * passed to writev or similar functions. Used by both
+ * the nif and driver api.
+ *
+ * Author: Lukas Larsson
+ */
+
+#ifndef ERL_IO_QUEUE_H__TYPES__
+#define ERL_IO_QUEUE_H__TYPES__
+
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
+#include "erl_nif.h"
+
+#ifdef DEBUG
+#define MAX_SYSIOVEC_IOVLEN (1ull << (32 - 1))
+#else
+#define MAX_SYSIOVEC_IOVLEN (1ull << (sizeof(((SysIOVec*)0)->iov_len) * 8 - 1))
+#endif
+
+#define ERTS_SMALL_IO_QUEUE 5
+
+typedef union {
+ ErlDrvBinary driver;
+ Binary nif;
+} ErtsIOQBinary;
+
+typedef struct {
+ int vsize; /* length of vectors */
+ Uint size; /* total size in bytes */
+ SysIOVec* iov;
+ ErtsIOQBinary** binv;
+} ErtsIOVecCommon;
+
+typedef union {
+ ErtsIOVecCommon common;
+ ErlIOVec driver;
+ ErlNifIOVec nif;
+} ErtsIOVec;
+
+/* head/tail represent the data in the queue
+ * start/end represent the edges of the allocated queue
+ * small is used when the number of iovec elements is < SMALL_IO_QUEUE
+ */
+typedef struct erts_io_queue {
+ ErtsAlcType_t alct;
+ int driver;
+ Uint size; /* total size in bytes */
+
+ SysIOVec* v_start;
+ SysIOVec* v_end;
+ SysIOVec* v_head;
+ SysIOVec* v_tail;
+ SysIOVec v_small[ERTS_SMALL_IO_QUEUE];
+
+ ErtsIOQBinary **b_start;
+ ErtsIOQBinary **b_end;
+ ErtsIOQBinary **b_head;
+ ErtsIOQBinary **b_tail;
+ ErtsIOQBinary *b_small[ERTS_SMALL_IO_QUEUE];
+
+} ErtsIOQueue;
+
+#endif /* ERL_IO_QUEUE_H__TYPES__ */
+
+#if !defined(ERL_IO_QUEUE_H) && !defined(ERTS_IO_QUEUE_TYPES_ONLY__)
+#define ERL_IO_QUEUE_H
+
+#include "erl_binary.h"
+#include "erl_bits.h"
+
+void erts_ioq_init(ErtsIOQueue *q, ErtsAlcType_t alct, int driver);
+void erts_ioq_clear(ErtsIOQueue *q);
+Uint erts_ioq_size(ErtsIOQueue *q);
+int erts_ioq_enqv(ErtsIOQueue *q, ErtsIOVec *vec, Uint skip);
+int erts_ioq_pushqv(ErtsIOQueue *q, ErtsIOVec *vec, Uint skip);
+int erts_ioq_deq(ErtsIOQueue *q, Uint Uint);
+Uint erts_ioq_peekqv(ErtsIOQueue *q, ErtsIOVec *ev);
+SysIOVec *erts_ioq_peekq(ErtsIOQueue *q, int *vlenp);
+Uint erts_ioq_sizeq(ErtsIOQueue *q);
+
+int erts_ioq_iolist_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ size_t* total_size, Uint blimit);
+int erts_ioq_iolist_to_vec(Eterm obj, SysIOVec* iov,
+ ErtsIOQBinary** binv, ErtsIOQBinary* cbin,
+ Uint bin_limit, int driver_binary);
+
+ERTS_GLB_INLINE
+int erts_ioq_iodata_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ size_t* total_size, Uint blimit);
+ERTS_GLB_INLINE
+int erts_ioq_iodata_to_vec(Eterm obj, SysIOVec* iov,
+ ErtsIOQBinary** binv, ErtsIOQBinary* cbin,
+ Uint bin_limit, int driver_binary);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+int erts_ioq_iodata_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ size_t* total_size, Uint blimit) {
+ if (is_binary(obj)) {
+ /* We optimize for when we get a procbin without a bit-offset
+ * that fits in one iov slot
+ */
+ Eterm real_bin;
+ byte bitoffs;
+ byte bitsize;
+ ERTS_DECLARE_DUMMY(Uint offset);
+ Uint size = binary_size(obj);
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
+ if (size < MAX_SYSIOVEC_IOVLEN && bitoffs == 0 && bitsize == 0) {
+ *vsize = 1;
+ *pvsize = 1;
+ if (thing_subtag(*binary_val(real_bin)) == REFC_BINARY_SUBTAG) {
+ *csize = 0;
+ *pcsize = 0;
+ } else {
+ *csize = size;
+ *pcsize = size;
+ }
+ *total_size = size;
+ return 0;
+ }
+ }
+
+ return erts_ioq_iolist_vec_len(obj, vsize, csize,
+ pvsize, pcsize, total_size, blimit);
+}
+
+ERTS_GLB_INLINE
+int erts_ioq_iodata_to_vec(Eterm obj,
+ SysIOVec *iov,
+ ErtsIOQBinary **binv,
+ ErtsIOQBinary *cbin,
+ Uint bin_limit,
+ int driver)
+{
+ if (is_binary(obj)) {
+ Eterm real_bin;
+ byte bitoffs;
+ byte bitsize;
+ Uint offset;
+ Uint size = binary_size(obj);
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
+ if (size < MAX_SYSIOVEC_IOVLEN && bitoffs == 0 && bitsize == 0) {
+ Eterm *bptr = binary_val(real_bin);
+ if (thing_subtag(*bptr) == REFC_BINARY_SUBTAG) {
+ ProcBin *pb = (ProcBin *)bptr;
+ if (pb->flags)
+ erts_emasculate_writable_binary(pb);
+ iov[0].iov_base = pb->bytes+offset;
+ iov[0].iov_len = size;
+ if (driver)
+ binv[0] = (ErtsIOQBinary*)Binary2ErlDrvBinary(pb->val);
+ else
+ binv[0] = (ErtsIOQBinary*)pb->val;
+ return 1;
+ } else {
+ ErlHeapBin* hb = (ErlHeapBin *)bptr;
+ byte *buf = driver ? (byte*)cbin->driver.orig_bytes :
+ (byte*)cbin->nif.orig_bytes;
+ copy_binary_to_buffer(buf, 0, ((byte *) hb->data)+offset, 0, 8*size);
+ iov[0].iov_base = buf;
+ iov[0].iov_len = size;
+ binv[0] = cbin;
+ return 1;
+ }
+ }
+ }
+ return erts_ioq_iolist_to_vec(obj, iov, binv, cbin, bin_limit, driver);
+}
+
+#endif
+
+#endif /* ERL_IO_QUEUE_H */
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 06266363b5..1416c5f96c 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -41,6 +41,7 @@
#include "erl_lock_check.h"
#include "erl_term.h"
#include "erl_threads.h"
+#include "erl_atom_table.h"
typedef struct {
char *name;
@@ -75,13 +76,10 @@ static erts_lc_lock_order_t erts_lock_order[] = {
* if only one lock use
* the lock name)"
*/
-#ifdef ERTS_SMP
+ { "NO LOCK", NULL },
{ "driver_lock", "driver_name" },
{ "port_lock", "port_id" },
-#endif
{ "port_data_lock", "address" },
-#ifdef ERTS_SMP
- { "bif_timers", NULL },
{ "reg_tab", NULL },
{ "proc_main", "pid" },
{ "old_code", "address" },
@@ -89,47 +87,39 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "hipe_mfait_lock", NULL },
#endif
{ "nodes_monitors", NULL },
+ { "meta_name_tab", "address" },
+ { "db_tab", "address" },
+ { "db_tab_fix", "address" },
+ { "db_hash_slot", "address" },
+ { "resource_monitors", "address" },
{ "driver_list", NULL },
- { "proc_link", "pid" },
{ "proc_msgq", "pid" },
{ "proc_btm", "pid" },
{ "dist_entry", "address" },
{ "dist_entry_links", "address" },
+ { "update_persistent_term_permission", NULL },
+ { "persistent_term_delete_permission", NULL },
{ "code_write_permission", NULL },
{ "purge_state", NULL },
{ "proc_status", "pid" },
{ "proc_trace", "pid" },
- { "ports_snapshot", NULL },
- { "meta_name_tab", "address" },
- { "meta_main_tab_slot", "address" },
- { "db_tab", "address" },
- { "db_tab_fix", "address" },
- { "meta_main_tab_main", NULL },
- { "db_hash_slot", "address" },
{ "node_table", NULL },
{ "dist_table", NULL },
{ "sys_tracers", NULL },
- { "module_tab", NULL },
{ "export_tab", NULL },
{ "fun_tab", NULL },
{ "environ", NULL },
-#ifdef ERTS_NEW_PURGE_STRATEGY
{ "release_literal_areas", NULL },
-#endif
-#endif
- { "efile_drv", "address" },
{ "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
- { "pollset_rm_list", NULL },
- { "removed_fd_pre_alloc_lock", "address" },
{ "state_prealloc", NULL },
{ "schdlr_sspnd", NULL },
{ "migration_info_update", NULL },
{ "run_queue", "address" },
-#ifdef ERTS_DIRTY_SCHEDULERS
{ "dirty_run_queue_sleep_list", "address" },
-#endif
+ { "dirty_gc_info", NULL },
+ { "dirty_break_point_index", NULL },
{ "process_table", NULL },
{ "cpu_info", NULL },
{ "pollset", "address" },
@@ -144,51 +134,31 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "async_enq_mtx", NULL },
{ "msacc_list_mutex", NULL },
{ "msacc_unmanaged_mutex", NULL },
-#ifdef ERTS_SMP
{ "atom_tab", NULL },
- { "misc_op_list_pre_alloc_lock", "address" },
- { "message_pre_alloc_lock", "address" },
- { "ptimer_pre_alloc_lock", "address", },
- { "btm_pre_alloc_lock", NULL, },
{ "dist_entry_out_queue", "address" },
{ "port_sched_lock", "port_id" },
{ "sys_msg_q", NULL },
{ "tracer_mtx", NULL },
{ "port_table", NULL },
-#endif
+ { "magic_ref_table", "address" },
{ "mtrace_op", NULL },
{ "instr_x", NULL },
{ "instr", NULL },
{ "alcu_allocator", "index" },
{ "mseg", NULL },
-#ifdef ERTS_SMP
- { "port_task_pre_alloc_lock", "address" },
- { "proclist_pre_alloc_lock", "address" },
- { "xports_list_pre_alloc_lock", "address" },
- { "inet_buffer_stack_lock", NULL },
- { "system_block", NULL },
- { "timeofday", NULL },
{ "get_time", NULL },
{ "get_corrected_time", NULL },
- { "breakpoints", NULL },
- { "pollsets_lock", NULL },
+ { "runtime", NULL },
{ "pix_lock", "address" },
- { "run_queues_lists", NULL },
{ "sched_stat", NULL },
-#endif
{ "async_init_mtx", NULL },
#ifdef __WIN32__
#ifdef DEBUG
{ "save_ops_lock", NULL },
#endif
#endif
-#ifdef USE_VM_PROBES
- { "efile_drv dtrace mutex", NULL },
-#endif
{ "mtrace_buf", NULL },
-#ifdef ERTS_SMP
{ "os_monotonic_time", NULL },
-#endif
{ "erts_alloc_hard_debug", NULL },
{ "hard_dbg_mseg", NULL },
{ "erts_mmap", NULL }
@@ -197,257 +167,253 @@ static erts_lc_lock_order_t erts_lock_order[] = {
#define ERTS_LOCK_ORDER_SIZE \
(sizeof(erts_lock_order)/sizeof(erts_lc_lock_order_t))
-#define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
- (((LCKD_FLG) & (ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)) \
- && ((LCK_FLG) \
- & ERTS_LC_FLG_LT_ALL \
- & ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
+#define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
+ (((LCKD_FLG) & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_SPINLOCK \
+ && \
+ ((LCK_FLG) & ERTS_LOCK_FLAGS_MASK_TYPE) != ERTS_LOCK_FLAGS_TYPE_SPINLOCK)
static __decl_noreturn void __noreturn lc_abort(void);
-static char *
-lock_type(Uint16 flags)
+static const char *rw_op_str(erts_lock_options_t options)
{
- switch (flags & ERTS_LC_FLG_LT_ALL) {
- case ERTS_LC_FLG_LT_SPINLOCK: return "[spinlock]";
- case ERTS_LC_FLG_LT_RWSPINLOCK: return "[rw(spin)lock]";
- case ERTS_LC_FLG_LT_MUTEX: return "[mutex]";
- case ERTS_LC_FLG_LT_RWMUTEX: return "[rwmutex]";
- case ERTS_LC_FLG_LT_PROCLOCK: return "[proclock]";
- default: return "";
+ if(options == ERTS_LOCK_OPTIONS_WRITE) {
+ ERTS_INTERNAL_ERROR("Only write flag present");
}
-}
-static char *
-rw_op_str(Uint16 flags)
-{
- switch (flags & ERTS_LC_FLG_LO_READ_WRITE) {
- case ERTS_LC_FLG_LO_READ_WRITE:
- return " (rw)";
- case ERTS_LC_FLG_LO_READ:
- return " (r)";
- case ERTS_LC_FLG_LO_WRITE:
- ERTS_INTERNAL_ERROR("Only write flag present");
- default:
- break;
- }
- return "";
+ return erts_lock_options_get_short_desc(options);
}
-typedef struct erts_lc_locked_lock_t_ erts_lc_locked_lock_t;
-struct erts_lc_locked_lock_t_ {
- erts_lc_locked_lock_t *next;
- erts_lc_locked_lock_t *prev;
+typedef struct lc_locked_lock_t_ lc_locked_lock_t;
+struct lc_locked_lock_t_ {
+ lc_locked_lock_t *next;
+ lc_locked_lock_t *prev;
UWord extra;
Sint16 id;
char *file;
unsigned int line;
- Uint16 flags;
+ erts_lock_flags_t flags;
+ erts_lock_options_t taken_options;
};
typedef struct {
- erts_lc_locked_lock_t *first;
- erts_lc_locked_lock_t *last;
-} erts_lc_locked_lock_list_t;
+ lc_locked_lock_t *first;
+ lc_locked_lock_t *last;
+} lc_locked_lock_list_t;
+
+typedef union lc_free_block_t_ lc_free_block_t;
+union lc_free_block_t_ {
+ lc_free_block_t *next;
+ lc_locked_lock_t lock;
+};
+
+typedef struct {
+ /*
+ * m[X][Y] & 1 if we locked X directly after Y was locked.
+ * m[X][Y] & 2 if we locked X indirectly after Y was locked.
+ * m[X][0] = 1 if we locked X when nothing else was locked.
+ * m[0][] is unused as it would represent locking "NO LOCK"
+ */
+ char m[ERTS_LOCK_ORDER_SIZE][ERTS_LOCK_ORDER_SIZE];
+
+} lc_matrix_t;
+
+static lc_matrix_t tot_lc_matrix;
+
+#define ERTS_LC_FB_CHUNK_SIZE 10
+
+typedef struct lc_alloc_chunk_t_ lc_alloc_chunk_t;
+struct lc_alloc_chunk_t_ {
+ lc_alloc_chunk_t* next;
+ lc_free_block_t array[ERTS_LC_FB_CHUNK_SIZE];
+};
-typedef struct erts_lc_locked_locks_t_ erts_lc_locked_locks_t;
-struct erts_lc_locked_locks_t_ {
+typedef struct lc_thread_t_ lc_thread_t;
+struct lc_thread_t_ {
char *thread_name;
int emu_thread;
erts_tid_t tid;
- erts_lc_locked_locks_t *next;
- erts_lc_locked_locks_t *prev;
- erts_lc_locked_lock_list_t locked;
- erts_lc_locked_lock_list_t required;
-};
-
-typedef union erts_lc_free_block_t_ erts_lc_free_block_t;
-union erts_lc_free_block_t_ {
- erts_lc_free_block_t *next;
- erts_lc_locked_lock_t lock;
+ lc_thread_t *next;
+ lc_thread_t *prev;
+ lc_locked_lock_list_t locked;
+ lc_locked_lock_list_t required;
+ lc_free_block_t *free_blocks;
+ lc_alloc_chunk_t *chunks;
+ lc_matrix_t matrix;
};
static ethr_tsd_key locks_key;
-static erts_lc_locked_locks_t *erts_locked_locks = NULL;
-
-static erts_lc_free_block_t *free_blocks = NULL;
-
-#ifdef ERTS_LC_STATIC_ALLOC
-#define ERTS_LC_FB_CHUNK_SIZE 10000
-#else
-#define ERTS_LC_FB_CHUNK_SIZE 10
-#endif
-
-static ethr_spinlock_t free_blocks_lock;
+static lc_thread_t *lc_threads = NULL;
+static ethr_spinlock_t lc_threads_lock;
static ERTS_INLINE void
-lc_lock(void)
+lc_lock_threads(void)
{
- ethr_spin_lock(&free_blocks_lock);
+ ethr_spin_lock(&lc_threads_lock);
}
static ERTS_INLINE void
-lc_unlock(void)
+lc_unlock_threads(void)
{
- ethr_spin_unlock(&free_blocks_lock);
+ ethr_spin_unlock(&lc_threads_lock);
}
-static ERTS_INLINE void lc_free(void *p)
+static ERTS_INLINE void lc_free(lc_thread_t* thr, lc_locked_lock_t *p)
{
- erts_lc_free_block_t *fb = (erts_lc_free_block_t *) p;
+ lc_free_block_t *fb = (lc_free_block_t *) p;
#ifdef DEBUG
- memset((void *) p, 0xdf, sizeof(erts_lc_free_block_t));
+ sys_memset((void *) p, 0xdf, sizeof(lc_free_block_t));
#endif
- lc_lock();
- fb->next = free_blocks;
- free_blocks = fb;
- lc_unlock();
+ fb->next = thr->free_blocks;
+ thr->free_blocks = fb;
}
-#ifdef ERTS_LC_STATIC_ALLOC
-
-static void *lc_core_alloc(void)
-{
- lc_unlock();
- ERTS_INTERNAL_ERROR("Lock checker out of memory!\n");
-}
-
-#else
-
-static void *lc_core_alloc(void)
+static lc_locked_lock_t *lc_core_alloc(lc_thread_t* thr)
{
int i;
- erts_lc_free_block_t *fbs;
- lc_unlock();
- fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t)
- * ERTS_LC_FB_CHUNK_SIZE);
- if (!fbs) {
+ lc_alloc_chunk_t* chunk;
+ lc_free_block_t* fbs;
+ chunk = (lc_alloc_chunk_t*) malloc(sizeof(lc_alloc_chunk_t));
+ if (!chunk) {
ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
}
+ chunk->next = thr->chunks;
+ thr->chunks = chunk;
+
+ fbs = chunk->array;
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
- memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t));
+ sys_memset((void *) &fbs[i], 0xdf, sizeof(lc_free_block_t));
#endif
fbs[i].next = &fbs[i+1];
}
#ifdef DEBUG
- memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1],
- 0xdf, sizeof(erts_lc_free_block_t));
+ sys_memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1],
+ 0xdf, sizeof(lc_free_block_t));
#endif
- lc_lock();
- fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = free_blocks;
- free_blocks = &fbs[1];
- return (void *) &fbs[0];
+ fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = thr->free_blocks;
+ thr->free_blocks = &fbs[1];
+ return &fbs[0].lock;
}
-#endif
-
-static ERTS_INLINE void *lc_alloc(void)
+static ERTS_INLINE lc_locked_lock_t *lc_alloc(lc_thread_t* thr)
{
- void *res;
- lc_lock();
- if (!free_blocks)
- res = lc_core_alloc();
+ lc_locked_lock_t *res;
+ if (!thr->free_blocks)
+ res = lc_core_alloc(thr);
else {
- res = (void *) free_blocks;
- free_blocks = free_blocks->next;
+ res = &thr->free_blocks->lock;
+ thr->free_blocks = thr->free_blocks->next;
}
- lc_unlock();
return res;
}
-static erts_lc_locked_locks_t *
-create_locked_locks(char *thread_name)
+static lc_thread_t *
+create_thread_data(char *thread_name)
{
- erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
- if (!l_lcks)
+ lc_thread_t *thr = malloc(sizeof(lc_thread_t));
+ if (!thr)
ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
- l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
- if (!l_lcks->thread_name)
+ thr->thread_name = strdup(thread_name ? thread_name : "unknown");
+ if (!thr->thread_name)
ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
- l_lcks->emu_thread = 0;
- l_lcks->tid = erts_thr_self();
- l_lcks->required.first = NULL;
- l_lcks->required.last = NULL;
- l_lcks->locked.first = NULL;
- l_lcks->locked.last = NULL;
- l_lcks->prev = NULL;
- lc_lock();
- l_lcks->next = erts_locked_locks;
- if (erts_locked_locks)
- erts_locked_locks->prev = l_lcks;
- erts_locked_locks = l_lcks;
- lc_unlock();
- erts_tsd_set(locks_key, (void *) l_lcks);
- return l_lcks;
+ thr->emu_thread = 0;
+ thr->tid = erts_thr_self();
+ thr->required.first = NULL;
+ thr->required.last = NULL;
+ thr->locked.first = NULL;
+ thr->locked.last = NULL;
+ thr->prev = NULL;
+ thr->free_blocks = NULL;
+ thr->chunks = NULL;
+ sys_memzero(&thr->matrix, sizeof(thr->matrix));
+
+ lc_lock_threads();
+ thr->next = lc_threads;
+ if (lc_threads)
+ lc_threads->prev = thr;
+ lc_threads = thr;
+ lc_unlock_threads();
+ erts_tsd_set(locks_key, (void *) thr);
+ return thr;
}
+static void collect_matrix(lc_matrix_t*);
+
static void
-destroy_locked_locks(erts_lc_locked_locks_t *l_lcks)
-{
- ASSERT(l_lcks->thread_name);
- free((void *) l_lcks->thread_name);
- ASSERT(l_lcks->required.first == NULL);
- ASSERT(l_lcks->required.last == NULL);
- ASSERT(l_lcks->locked.first == NULL);
- ASSERT(l_lcks->locked.last == NULL);
-
- lc_lock();
- if (l_lcks->prev)
- l_lcks->prev->next = l_lcks->next;
+destroy_thread_data(lc_thread_t *thr)
+{
+ ASSERT(thr->thread_name);
+ free((void *) thr->thread_name);
+ ASSERT(thr->required.first == NULL);
+ ASSERT(thr->required.last == NULL);
+ ASSERT(thr->locked.first == NULL);
+ ASSERT(thr->locked.last == NULL);
+
+ lc_lock_threads();
+ if (thr->prev)
+ thr->prev->next = thr->next;
else {
- ASSERT(erts_locked_locks == l_lcks);
- erts_locked_locks = l_lcks->next;
+ ASSERT(lc_threads == thr);
+ lc_threads = thr->next;
}
+ if (thr->next)
+ thr->next->prev = thr->prev;
- if (l_lcks->next)
- l_lcks->next->prev = l_lcks->prev;
- lc_unlock();
+ collect_matrix(&thr->matrix);
- free((void *) l_lcks);
+ lc_unlock_threads();
+ while (thr->chunks) {
+ lc_alloc_chunk_t* free_me = thr->chunks;
+ thr->chunks = thr->chunks->next;
+ free(free_me);
+ }
+
+ free((void *) thr);
}
-static ERTS_INLINE erts_lc_locked_locks_t *
+static ERTS_INLINE lc_thread_t *
get_my_locked_locks(void)
{
return erts_tsd_get(locks_key);
}
-static ERTS_INLINE erts_lc_locked_locks_t *
+static ERTS_INLINE lc_thread_t *
make_my_locked_locks(void)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- if (l_lcks)
- return l_lcks;
+ lc_thread_t *thr = get_my_locked_locks();
+ if (thr)
+ return thr;
else
- return create_locked_locks(NULL);
+ return create_thread_data(NULL);
}
-static ERTS_INLINE erts_lc_locked_lock_t *
-new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
+static ERTS_INLINE lc_locked_lock_t *
+new_locked_lock(lc_thread_t* thr,
+ erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
- erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
- l_lck->next = NULL;
- l_lck->prev = NULL;
- l_lck->id = lck->id;
- l_lck->extra = lck->extra;
- l_lck->file = file;
- l_lck->line = line;
- l_lck->flags = lck->flags | op_flags;
- return l_lck;
+ lc_locked_lock_t *ll = lc_alloc(thr);
+ ll->next = NULL;
+ ll->prev = NULL;
+ ll->id = lck->id;
+ ll->extra = lck->extra;
+ ll->file = file;
+ ll->line = line;
+ ll->flags = lck->flags;
+ ll->taken_options = options;
+ return ll;
}
static void
-raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
+raw_print_lock(char *prefix, Sint16 id, Wterm extra, erts_lock_flags_t flags,
char* file, unsigned int line, char *suffix)
{
- char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
+ char *lname = (1 <= id && id < ERTS_LOCK_ORDER_SIZE
? erts_lock_order[id].name
: "unknown");
erts_fprintf(stderr,"%s'%s:",prefix,lname);
@@ -456,16 +422,16 @@ raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
erts_fprintf(stderr,"%p",_unchecked_boxed_val(extra));
else
erts_fprintf(stderr,"%T",extra);
- erts_fprintf(stderr,"%s",lock_type(flags));
+ erts_fprintf(stderr,"[%s]",erts_lock_flags_get_type_name(flags));
if (file)
erts_fprintf(stderr,"(%s:%d)",file,line);
- erts_fprintf(stderr,"'%s%s",rw_op_str(flags),suffix);
+ erts_fprintf(stderr,"'(%s)%s",rw_op_str(flags),suffix);
}
static void
-print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix)
+print_lock2(char *prefix, Sint16 id, Wterm extra, erts_lock_flags_t flags, char *suffix)
{
raw_print_lock(prefix, id, extra, flags, NULL, 0, suffix);
}
@@ -477,20 +443,20 @@ print_lock(char *prefix, erts_lc_lock_t *lck, char *suffix)
}
static void
-print_curr_locks(erts_lc_locked_locks_t *l_lcks)
+print_curr_locks(lc_thread_t *thr)
{
- erts_lc_locked_lock_t *l_lck;
- if (!l_lcks || !l_lcks->locked.first)
+ lc_locked_lock_t *ll;
+ if (!thr || !thr->locked.first)
erts_fprintf(stderr,
"Currently no locks are locked by the %s thread.\n",
- l_lcks->thread_name);
+ thr->thread_name);
else {
erts_fprintf(stderr,
"Currently these locks are locked by the %s thread:\n",
- l_lcks->thread_name);
- for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
- raw_print_lock(" ", l_lck->id, l_lck->extra, l_lck->flags,
- l_lck->file, l_lck->line, "\n");
+ thr->thread_name);
+ for (ll = thr->locked.first; ll; ll = ll->next)
+ raw_print_lock(" ", ll->id, ll->extra, ll->flags,
+ ll->file, ll->line, "\n");
}
}
@@ -519,55 +485,55 @@ uninitialized_lock(void)
}
static void
-lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
- Uint16 op_flags)
+lock_twice(char *prefix, lc_thread_t *thr, erts_lc_lock_t *lck,
+ erts_lock_options_t options)
{
- erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
+ erts_fprintf(stderr, "%s (%s)", prefix, rw_op_str(options));
print_lock(" ", lck, " lock which is already locked by thread!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
- Uint16 op_flags)
+unlock_op_mismatch(lc_thread_t *thr, erts_lc_lock_t *lck,
+ erts_lock_options_t options)
{
- erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
+ erts_fprintf(stderr, "Unlocking (%s) ", rw_op_str(options));
print_lock("", lck, " lock which mismatch previous lock operation!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-unlock_of_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+unlock_of_not_locked(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Unlocking ", lck, " lock which is not locked by thread!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-lock_order_violation(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+lock_order_violation(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Lock order violation occured when locking ", lck, "!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
print_lock_order();
lc_abort();
}
static void
-type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks,
+type_order_violation(char *op, lc_thread_t *thr,
erts_lc_lock_t *lck)
{
erts_fprintf(stderr, "Lock type order violation occured when ");
print_lock(op, lck, "!\n");
- ASSERT(l_lcks);
- print_curr_locks(l_lcks);
+ ASSERT(thr);
+ print_curr_locks(thr);
lc_abort();
}
static void
-lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
+lock_mismatch(lc_thread_t *thr, int exact,
int failed_have, erts_lc_lock_t *have, int have_len,
int failed_have_not, erts_lc_lock_t *have_not, int have_not_len)
{
@@ -614,39 +580,39 @@ lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
print_lock2(" ", have_not[i].id, have_not[i].extra, 0, "\n");
}
}
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-unlock_of_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+unlock_of_required_lock(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Unlocking required ", lck, " lock!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-unrequire_of_not_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+unrequire_of_not_required_lock(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Unrequire on ", lck, " lock not required!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-require_twice(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+require_twice(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Require on ", lck, " lock already required!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+required_not_locked(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Required ", lck, " lock not locked!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
@@ -654,15 +620,15 @@ required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
static void
thread_exit_handler(void)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- if (l_lcks) {
- if (l_lcks->locked.first) {
+ lc_thread_t *thr = get_my_locked_locks();
+ if (thr) {
+ if (thr->locked.first) {
erts_fprintf(stderr,
"Thread exiting while having locked locks!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
- destroy_locked_locks(l_lcks);
+ destroy_thread_data(thr);
/* erts_tsd_set(locks_key, NULL); */
}
}
@@ -680,24 +646,24 @@ lc_abort(void)
void
erts_lc_set_thread_name(char *thread_name)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- if (!l_lcks)
- l_lcks = create_locked_locks(thread_name);
+ lc_thread_t *thr = get_my_locked_locks();
+ if (!thr)
+ thr = create_thread_data(thread_name);
else {
- ASSERT(l_lcks->thread_name);
- free((void *) l_lcks->thread_name);
- l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
- if (!l_lcks->thread_name)
+ ASSERT(thr->thread_name);
+ free((void *) thr->thread_name);
+ thr->thread_name = strdup(thread_name ? thread_name : "unknown");
+ if (!thr->thread_name)
ERTS_INTERNAL_ERROR("strdup failed");
}
- l_lcks->emu_thread = 1;
+ thr->emu_thread = 1;
}
int
erts_lc_is_emu_thr(void)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- return l_lcks->emu_thread;
+ lc_thread_t *thr = get_my_locked_locks();
+ return thr->emu_thread;
}
int
@@ -732,7 +698,7 @@ erts_lc_get_lock_order_id(char *name)
erts_fprintf(stderr, "Missing lock name\n");
else {
for (i = 0; i < ERTS_LOCK_ORDER_SIZE; i++)
- if (strcmp(erts_lock_order[i].name, name) == 0)
+ if (sys_strcmp(erts_lock_order[i].name, name) == 0)
return i;
erts_fprintf(stderr,
"Lock name '%s' missing in lock order "
@@ -743,115 +709,159 @@ erts_lc_get_lock_order_id(char *name)
return (Sint16) -1;
}
+static int compare_locked_by_id(lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
+{
+ if(locked_lock->id < comparand->id) {
+ return -1;
+ } else if(locked_lock->id > comparand->id) {
+ return 1;
+ }
-static int
-find_lock(erts_lc_locked_lock_t **l_lcks, erts_lc_lock_t *lck)
+ return 0;
+}
+
+static int compare_locked_by_id_extra(lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
{
- erts_lc_locked_lock_t *l_lck = *l_lcks;
+ int order = compare_locked_by_id(locked_lock, comparand);
+
+ if(order) {
+ return order;
+ } else if(locked_lock->extra < comparand->extra) {
+ return -1;
+ } else if(locked_lock->extra > comparand->extra) {
+ return 1;
+ }
- if (l_lck) {
- if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
- if ((l_lck->flags & lck->flags) == lck->flags)
- return 1;
- return 0;
- }
- else if (l_lck->id < lck->id
- || (l_lck->id == lck->id
- && l_lck->extra < lck->extra)) {
- for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
- if (l_lck->id > lck->id
- || (l_lck->id == lck->id
- && l_lck->extra >= lck->extra)) {
- *l_lcks = l_lck;
- if (l_lck->id == lck->id
- && l_lck->extra == lck->extra
- && ((l_lck->flags & lck->flags) == lck->flags))
- return 1;
- return 0;
- }
- }
- }
- else {
- for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id < lck->id
- || (l_lck->id == lck->id
- && l_lck->extra <= lck->extra)) {
- *l_lcks = l_lck;
- if (l_lck->id == lck->id
- && l_lck->extra == lck->extra
- && ((l_lck->flags & lck->flags) == lck->flags))
- return 1;
- return 0;
- }
- }
- }
+ return 0;
+}
+
+typedef int (*locked_compare_func)(lc_locked_lock_t *, erts_lc_lock_t *);
+
+/* Searches through a list of taken locks, bailing when it hits an entry whose
+ * order relative to the search template is the opposite of the one at the
+ * start of the search. (*closest_neighbor) is either set to the exact match,
+ * or the one closest to it in the sort order. */
+static int search_locked_list(locked_compare_func compare,
+ lc_locked_lock_t *locked_locks,
+ erts_lc_lock_t *search_template,
+ lc_locked_lock_t **closest_neighbor)
+{
+ lc_locked_lock_t *iterator = locked_locks;
+
+ (*closest_neighbor) = iterator;
+
+ if(iterator) {
+ int relative_order = compare(iterator, search_template);
+
+ if(relative_order < 0) {
+ while((iterator = iterator->next) != NULL) {
+ relative_order = compare(iterator, search_template);
+
+ if(relative_order >= 0) {
+ (*closest_neighbor) = iterator;
+ break;
+ }
+ }
+ } else if(relative_order > 0) {
+ while((iterator = iterator->prev) != NULL) {
+ relative_order = compare(iterator, search_template);
+
+ if(relative_order <= 0) {
+ (*closest_neighbor) = iterator;
+ break;
+ }
+ }
+ }
+
+ return relative_order == 0;
}
+
return 0;
}
+/* Searches for a lock in the given list that matches search_template, and sets
+ * (*locked_locks) to the closest lock in the sort order. */
static int
-find_id(erts_lc_locked_lock_t **l_lcks, Sint16 id)
-{
- erts_lc_locked_lock_t *l_lck = *l_lcks;
-
- if (l_lck) {
- if (l_lck->id == id)
- return 1;
- else if (l_lck->id < id) {
- for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
- if (l_lck->id >= id) {
- *l_lcks = l_lck;
- if (l_lck->id == id)
- return 1;
- return 0;
- }
- }
- }
- else {
- for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id <= id) {
- *l_lcks = l_lck;
- if (l_lck->id == id)
- return 1;
- return 0;
- }
- }
- }
+find_lock(lc_locked_lock_t **locked_locks, erts_lc_lock_t *search_template)
+{
+ lc_locked_lock_t *closest_neighbor;
+ int found_lock;
+
+ found_lock = search_locked_list(compare_locked_by_id_extra,
+ (*locked_locks),
+ search_template,
+ &closest_neighbor);
+
+ (*locked_locks) = closest_neighbor;
+
+ if(found_lock) {
+ erts_lock_options_t relevant_options;
+ erts_lock_flags_t relevant_flags;
+
+ /* We only care about the options and flags that are set in the
+ * template. */
+ relevant_options = (closest_neighbor->taken_options & search_template->taken_options);
+ relevant_flags = (closest_neighbor->flags & search_template->flags);
+
+ return search_template->taken_options == relevant_options &&
+ search_template->flags == relevant_flags;
}
+
return 0;
}
+/* Searches for a lock in the given list by id, and sets (*locked_locks) to the
+ * closest lock in the sort order. */
+static int
+find_id(lc_locked_lock_t **locked_locks, Sint16 id)
+{
+ lc_locked_lock_t *closest_neighbor;
+ erts_lc_lock_t search_template;
+ int found_lock;
+
+ search_template.id = id;
+
+ found_lock = search_locked_list(compare_locked_by_id,
+ (*locked_locks),
+ &search_template,
+ &closest_neighbor);
+
+ (*locked_locks) = closest_neighbor;
+
+ return found_lock;
+}
+
void
erts_lc_have_locks(int *resv, erts_lc_lock_t *locks, int len)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ lc_thread_t *thr = get_my_locked_locks();
int i;
- if (!l_lcks) {
+ if (!thr) {
for (i = 0; i < len; i++)
resv[i] = 0;
}
else {
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ lc_locked_lock_t *ll = thr->locked.first;
for (i = 0; i < len; i++)
- resv[i] = find_lock(&l_lck, &locks[i]);
+ resv[i] = find_lock(&ll, &locks[i]);
}
}
void
erts_lc_have_lock_ids(int *resv, int *ids, int len)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ lc_thread_t *thr = get_my_locked_locks();
int i;
- if (!l_lcks) {
+ if (!thr) {
for (i = 0; i < len; i++)
resv[i] = 0;
}
else {
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ lc_locked_lock_t *ll = thr->locked.first;
for (i = 0; i < len; i++)
- resv[i] = find_id(&l_lck, ids[i]);
+ resv[i] = find_id(&ll, ids[i]);
}
}
@@ -860,27 +870,27 @@ erts_lc_check(erts_lc_lock_t *have, int have_len,
erts_lc_lock_t *have_not, int have_not_len)
{
int i;
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- erts_lc_locked_lock_t *l_lck;
+ lc_thread_t *thr = get_my_locked_locks();
+ lc_locked_lock_t *ll;
if (have && have_len > 0) {
- if (!l_lcks)
+ if (!thr)
lock_mismatch(NULL, 0,
-1, have, have_len,
-1, have_not, have_not_len);
- l_lck = l_lcks->locked.first;
+ ll = thr->locked.first;
for (i = 0; i < have_len; i++) {
- if (!find_lock(&l_lck, &have[i]))
- lock_mismatch(l_lcks, 0,
+ if (!find_lock(&ll, &have[i]))
+ lock_mismatch(thr, 0,
i, have, have_len,
-1, have_not, have_not_len);
}
}
- if (have_not && have_not_len > 0 && l_lcks) {
- l_lck = l_lcks->locked.first;
+ if (have_not && have_not_len > 0 && thr) {
+ ll = thr->locked.first;
for (i = 0; i < have_not_len; i++) {
- if (find_lock(&l_lck, &have_not[i]))
- lock_mismatch(l_lcks, 0,
+ if (find_lock(&ll, &have_not[i]))
+ lock_mismatch(thr, 0,
-1, have, have_len,
i, have_not, have_not_len);
}
@@ -890,8 +900,8 @@ erts_lc_check(erts_lc_lock_t *have, int have_len,
void
erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- if (!l_lcks) {
+ lc_thread_t *thr = get_my_locked_locks();
+ if (!thr) {
if (have && have_len > 0)
lock_mismatch(NULL, 1,
-1, have, have_len,
@@ -899,35 +909,35 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
}
else {
int i;
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ lc_locked_lock_t *ll = thr->locked.first;
for (i = 0; i < have_len; i++) {
- if (!find_lock(&l_lck, &have[i]))
- lock_mismatch(l_lcks, 1,
+ if (!find_lock(&ll, &have[i]))
+ lock_mismatch(thr, 1,
i, have, have_len,
-1, NULL, 0);
}
- for (i = 0, l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
+ for (i = 0, ll = thr->locked.first; ll; ll = ll->next)
i++;
if (i != have_len)
- lock_mismatch(l_lcks, 1,
+ lock_mismatch(thr, 1,
-1, have, have_len,
-1, NULL, 0);
}
}
void
-erts_lc_check_no_locked_of_type(Uint16 flags)
+erts_lc_check_no_locked_of_type(erts_lock_flags_t type)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- if (l_lcks) {
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
- for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
- if (l_lck->flags & flags) {
+ lc_thread_t *thr = get_my_locked_locks();
+ if (thr) {
+ lc_locked_lock_t *ll = thr->locked.first;
+ for (ll = thr->locked.first; ll; ll = ll->next) {
+ if ((ll->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == type) {
erts_fprintf(stderr,
"Locked lock of type %s found which isn't "
"allowed here!\n",
- lock_type(l_lck->flags));
- print_curr_locks(l_lcks);
+ erts_lock_flags_get_type_name(ll->flags));
+ print_curr_locks(thr);
lc_abort();
}
}
@@ -935,7 +945,7 @@ erts_lc_check_no_locked_of_type(Uint16 flags)
}
int
-erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
#ifdef ERTS_LC_DO_NOT_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
return 0;
@@ -945,7 +955,7 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
* This in order to make sure that caller can handle
* the situation without causing a lock order violation.
*/
- erts_lc_locked_locks_t *l_lcks;
+ lc_thread_t *thr;
if (lck->inited != ERTS_LC_INITITALIZED)
uninitialized_lock();
@@ -953,25 +963,25 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
if (lck->id < 0)
return 0;
- l_lcks = get_my_locked_locks();
+ thr = get_my_locked_locks();
- if (!l_lcks || !l_lcks->locked.first) {
- ASSERT(!l_lcks || !l_lcks->locked.last);
+ if (!thr || !thr->locked.first) {
+ ASSERT(!thr || !thr->locked.last);
return 0;
}
else {
- erts_lc_locked_lock_t *tl_lck;
+ lc_locked_lock_t *tl_lck;
- ASSERT(l_lcks->locked.last);
+ ASSERT(thr->locked.last);
#if 0 /* Ok when trylocking I guess... */
- if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
- type_order_violation("trylocking ", l_lcks, lck);
+ if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, thr->locked.last->flags))
+ type_order_violation("trylocking ", thr, lck);
#endif
- if (l_lcks->locked.last->id < lck->id
- || (l_lcks->locked.last->id == lck->id
- && l_lcks->locked.last->extra < lck->extra))
+ if (thr->locked.last->id < lck->id
+ || (thr->locked.last->id == lck->id
+ && thr->locked.last->extra < lck->extra))
return 0;
/*
@@ -980,11 +990,11 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
/* Check that we are not trying to lock this lock twice */
- for (tl_lck = l_lcks->locked.last; tl_lck; tl_lck = tl_lck->prev) {
+ for (tl_lck = thr->locked.last; tl_lck; tl_lck = tl_lck->prev) {
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, op_flags);
+ lock_twice("Trylocking", thr, lck, options);
break;
}
}
@@ -1006,11 +1016,11 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
#endif
}
-void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
- erts_lc_locked_locks_t *l_lcks;
- erts_lc_locked_lock_t *l_lck;
+ lc_thread_t *thr;
+ lc_locked_lock_t *ll;
if (lck->inited != ERTS_LC_INITITALIZED)
uninitialized_lock();
@@ -1018,128 +1028,128 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
if (lck->id < 0)
return;
- l_lcks = make_my_locked_locks();
- l_lck = locked ? new_locked_lock(lck, op_flags, file, line) : NULL;
+ thr = make_my_locked_locks();
+ ll = locked ? new_locked_lock(thr, lck, options, file, line) : NULL;
- if (!l_lcks->locked.last) {
- ASSERT(!l_lcks->locked.first);
+ if (!thr->locked.last) {
+ ASSERT(!thr->locked.first);
if (locked)
- l_lcks->locked.first = l_lcks->locked.last = l_lck;
+ thr->locked.first = thr->locked.last = ll;
}
else {
- erts_lc_locked_lock_t *tl_lck;
+ lc_locked_lock_t *tl_lck;
#if 0 /* Ok when trylocking I guess... */
- if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
- type_order_violation("trylocking ", l_lcks, lck);
+ if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, thr->locked.last->flags))
+ type_order_violation("trylocking ", thr, lck);
#endif
- for (tl_lck = l_lcks->locked.last; tl_lck; tl_lck = tl_lck->prev) {
+ for (tl_lck = thr->locked.last; tl_lck; tl_lck = tl_lck->prev) {
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, op_flags);
+ lock_twice("Trylocking", thr, lck, options);
if (locked) {
- l_lck->next = tl_lck->next;
- l_lck->prev = tl_lck;
+ ll->next = tl_lck->next;
+ ll->prev = tl_lck;
if (tl_lck->next)
- tl_lck->next->prev = l_lck;
+ tl_lck->next->prev = ll;
else
- l_lcks->locked.last = l_lck;
- tl_lck->next = l_lck;
+ thr->locked.last = ll;
+ tl_lck->next = ll;
}
return;
}
}
if (locked) {
- l_lck->next = l_lcks->locked.first;
- l_lcks->locked.first->prev = l_lck;
- l_lcks->locked.first = l_lck;
+ ll->next = thr->locked.first;
+ thr->locked.first->prev = ll;
+ thr->locked.first = ll;
}
}
}
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
- erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
- if (!find_lock(&l_lck, lck))
- required_not_locked(l_lcks, lck);
- l_lck = new_locked_lock(lck, op_flags, file, line);
- if (!l_lcks->required.last) {
- ASSERT(!l_lcks->required.first);
- l_lck->next = l_lck->prev = NULL;
- l_lcks->required.first = l_lcks->required.last = l_lck;
+ lc_thread_t *thr = make_my_locked_locks();
+ lc_locked_lock_t *ll = thr->locked.first;
+ if (!find_lock(&ll, lck))
+ required_not_locked(thr, lck);
+ ll = new_locked_lock(thr, lck, options, file, line);
+ if (!thr->required.last) {
+ ASSERT(!thr->required.first);
+ ll->next = ll->prev = NULL;
+ thr->required.first = thr->required.last = ll;
}
else {
- erts_lc_locked_lock_t *l_lck2;
- ASSERT(l_lcks->required.first);
- for (l_lck2 = l_lcks->required.last;
+ lc_locked_lock_t *l_lck2;
+ ASSERT(thr->required.first);
+ for (l_lck2 = thr->required.last;
l_lck2;
l_lck2 = l_lck2->prev) {
if (l_lck2->id < lck->id
|| (l_lck2->id == lck->id && l_lck2->extra < lck->extra))
break;
else if (l_lck2->id == lck->id && l_lck2->extra == lck->extra)
- require_twice(l_lcks, lck);
+ require_twice(thr, lck);
}
if (!l_lck2) {
- l_lck->next = l_lcks->required.first;
- l_lck->prev = NULL;
- l_lcks->required.first->prev = l_lck;
- l_lcks->required.first = l_lck;
+ ll->next = thr->required.first;
+ ll->prev = NULL;
+ thr->required.first->prev = ll;
+ thr->required.first = ll;
}
else {
- l_lck->next = l_lck2->next;
- if (l_lck->next) {
- ASSERT(l_lcks->required.last != l_lck2);
- l_lck->next->prev = l_lck;
+ ll->next = l_lck2->next;
+ if (ll->next) {
+ ASSERT(thr->required.last != l_lck2);
+ ll->next->prev = ll;
}
else {
- ASSERT(l_lcks->required.last == l_lck2);
- l_lcks->required.last = l_lck;
+ ASSERT(thr->required.last == l_lck2);
+ thr->required.last = ll;
}
- l_lck->prev = l_lck2;
- l_lck2->next = l_lck;
+ ll->prev = l_lck2;
+ l_lck2->next = ll;
}
}
}
-void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
- erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
- if (!find_lock(&l_lck, lck))
- required_not_locked(l_lcks, lck);
- l_lck = l_lcks->required.first;
- if (!find_lock(&l_lck, lck))
- unrequire_of_not_required_lock(l_lcks, lck);
- if (l_lck->prev) {
- ASSERT(l_lcks->required.first != l_lck);
- l_lck->prev->next = l_lck->next;
+ lc_thread_t *thr = make_my_locked_locks();
+ lc_locked_lock_t *ll = thr->locked.first;
+ if (!find_lock(&ll, lck))
+ required_not_locked(thr, lck);
+ ll = thr->required.first;
+ if (!find_lock(&ll, lck))
+ unrequire_of_not_required_lock(thr, lck);
+ if (ll->prev) {
+ ASSERT(thr->required.first != ll);
+ ll->prev->next = ll->next;
}
else {
- ASSERT(l_lcks->required.first == l_lck);
- l_lcks->required.first = l_lck->next;
+ ASSERT(thr->required.first == ll);
+ thr->required.first = ll->next;
}
- if (l_lck->next) {
- ASSERT(l_lcks->required.last != l_lck);
- l_lck->next->prev = l_lck->prev;
+ if (ll->next) {
+ ASSERT(thr->required.last != ll);
+ ll->next->prev = ll->prev;
}
else {
- ASSERT(l_lcks->required.last == l_lck);
- l_lcks->required.last = l_lck->prev;
+ ASSERT(thr->required.last == ll);
+ thr->required.last = ll->prev;
}
- lc_free((void *) l_lck);
+ lc_free(thr, ll);
}
-void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
- erts_lc_locked_locks_t *l_lcks;
- erts_lc_locked_lock_t *l_lck;
+ lc_thread_t *thr;
+ lc_locked_lock_t *new_ll;
if (lck->inited != ERTS_LC_INITITALIZED)
uninitialized_lock();
@@ -1147,32 +1157,45 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
if (lck->id < 0)
return;
- l_lcks = make_my_locked_locks();
- l_lck = new_locked_lock(lck, op_flags, file, line);
+ thr = make_my_locked_locks();
+ new_ll = new_locked_lock(thr, lck, options, file, line);
- if (!l_lcks->locked.last) {
- ASSERT(!l_lcks->locked.first);
- l_lcks->locked.last = l_lcks->locked.first = l_lck;
+ if (!thr->locked.last) {
+ ASSERT(!thr->locked.first);
+ thr->locked.last = thr->locked.first = new_ll;
+ ASSERT(0 < lck->id && lck->id < ERTS_LOCK_ORDER_SIZE);
+ thr->matrix.m[lck->id][0] = 1;
}
- else if (l_lcks->locked.last->id < lck->id
- || (l_lcks->locked.last->id == lck->id
- && l_lcks->locked.last->extra < lck->extra)) {
- if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
- type_order_violation("locking ", l_lcks, lck);
- l_lck->prev = l_lcks->locked.last;
- l_lcks->locked.last->next = l_lck;
- l_lcks->locked.last = l_lck;
+ else if (thr->locked.last->id < lck->id
+ || (thr->locked.last->id == lck->id
+ && thr->locked.last->extra < lck->extra)) {
+ lc_locked_lock_t* ll;
+ if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, thr->locked.last->flags)) {
+ type_order_violation("locking ", thr, lck);
+ }
+
+ ASSERT(0 < lck->id && lck->id < ERTS_LOCK_ORDER_SIZE);
+ ll = thr->locked.last;
+ thr->matrix.m[lck->id][ll->id] |= 1;
+ for (ll = ll->prev; ll; ll = ll->prev) {
+ ASSERT(0 < ll->id && ll->id < ERTS_LOCK_ORDER_SIZE);
+ thr->matrix.m[lck->id][ll->id] |= 2;
+ }
+
+ new_ll->prev = thr->locked.last;
+ thr->locked.last->next = new_ll;
+ thr->locked.last = new_ll;
}
- else if (l_lcks->locked.last->id == lck->id && l_lcks->locked.last->extra == lck->extra)
- lock_twice("Locking", l_lcks, lck, op_flags);
+ else if (thr->locked.last->id == lck->id && thr->locked.last->extra == lck->extra)
+ lock_twice("Locking", thr, lck, options);
else
- lock_order_violation(l_lcks, lck);
+ lock_order_violation(thr, lck);
}
-void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
- erts_lc_locked_locks_t *l_lcks;
- erts_lc_locked_lock_t *l_lck;
+ lc_thread_t *thr;
+ lc_locked_lock_t *ll;
if (lck->inited != ERTS_LC_INITITALIZED)
uninitialized_lock();
@@ -1180,38 +1203,38 @@ void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
if (lck->id < 0)
return;
- l_lcks = get_my_locked_locks();
+ thr = get_my_locked_locks();
- if (l_lcks) {
- l_lck = l_lcks->required.first;
- if (find_lock(&l_lck, lck))
- unlock_of_required_lock(l_lcks, lck);
+ if (thr) {
+ ll = thr->required.first;
+ if (find_lock(&ll, lck))
+ unlock_of_required_lock(thr, lck);
}
- for (l_lck = l_lcks ? l_lcks->locked.last : NULL; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
- if ((l_lck->flags & ERTS_LC_FLG_LO_ALL) != op_flags)
- unlock_op_mismatch(l_lcks, lck, op_flags);
- if (l_lck->prev)
- l_lck->prev->next = l_lck->next;
+ for (ll = thr ? thr->locked.last : NULL; ll; ll = ll->prev) {
+ if (ll->id == lck->id && ll->extra == lck->extra) {
+ if ((ll->taken_options & ERTS_LOCK_OPTIONS_RDWR) != options)
+ unlock_op_mismatch(thr, lck, options);
+ if (ll->prev)
+ ll->prev->next = ll->next;
else
- l_lcks->locked.first = l_lck->next;
- if (l_lck->next)
- l_lck->next->prev = l_lck->prev;
+ thr->locked.first = ll->next;
+ if (ll->next)
+ ll->next->prev = ll->prev;
else
- l_lcks->locked.last = l_lck->prev;
- lc_free((void *) l_lck);
+ thr->locked.last = ll->prev;
+ lc_free(thr, ll);
return;
}
}
- unlock_of_not_locked(l_lcks, lck);
+ unlock_of_not_locked(thr, lck);
}
-void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
- erts_lc_locked_locks_t *l_lcks;
- erts_lc_locked_lock_t *l_lck;
+ lc_thread_t *thr;
+ lc_locked_lock_t *ll;
if (lck->inited != ERTS_LC_INITITALIZED)
uninitialized_lock();
@@ -1219,17 +1242,17 @@ void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
if (lck->id < 0)
return;
- l_lcks = get_my_locked_locks();
+ thr = get_my_locked_locks();
- if (l_lcks) {
- l_lck = l_lcks->required.first;
- if (find_lock(&l_lck, lck))
- unlock_of_required_lock(l_lcks, lck);
+ if (thr) {
+ ll = thr->required.first;
+ if (find_lock(&ll, lck))
+ unlock_of_required_lock(thr, lck);
}
- l_lck = l_lcks->locked.first;
- if (!find_lock(&l_lck, lck))
- unlock_of_not_locked(l_lcks, lck);
+ ll = thr->locked.first;
+ if (!find_lock(&ll, lck))
+ unlock_of_not_locked(thr, lck);
}
int
@@ -1272,23 +1295,25 @@ void erts_lc_unrequire_lock(erts_lc_lock_t *lck)
}
void
-erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
+erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags)
{
lck->id = erts_lc_get_lock_order_id(name);
lck->extra = (UWord) &lck->extra;
ASSERT(is_not_immed(lck->extra));
lck->flags = flags;
+ lck->taken_options = 0;
lck->inited = ERTS_LC_INITITALIZED;
}
void
-erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra)
+erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra)
{
lck->id = erts_lc_get_lock_order_id(name);
lck->extra = extra;
ASSERT(is_immed(lck->extra));
lck->flags = flags;
+ lck->taken_options = 0;
lck->inited = ERTS_LC_INITITALIZED;
}
@@ -1302,31 +1327,13 @@ erts_lc_destroy_lock(erts_lc_lock_t *lck)
lck->id = -1;
lck->extra = THE_NON_VALUE;
lck->flags = 0;
+ lck->taken_options = 0;
}
void
erts_lc_init(void)
{
-#ifdef ERTS_LC_STATIC_ALLOC
- int i;
- static erts_lc_free_block_t fbs[ERTS_LC_FB_CHUNK_SIZE];
- for (i = 0; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
-#ifdef DEBUG
- memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t));
-#endif
- fbs[i].next = &fbs[i+1];
- }
-#ifdef DEBUG
- memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1],
- 0xdf, sizeof(erts_lc_free_block_t));
-#endif
- fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = NULL;
- free_blocks = &fbs[0];
-#else /* #ifdef ERTS_LC_STATIC_ALLOC */
- free_blocks = NULL;
-#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
-
- if (ethr_spinlock_init(&free_blocks_lock) != 0)
+ if (ethr_spinlock_init(&lc_threads_lock) != 0)
ERTS_INTERNAL_ERROR("spinlock_init failed");
erts_tsd_key_create(&locks_key,"erts_lock_check_key");
@@ -1348,5 +1355,76 @@ erts_lc_pll(void)
print_curr_locks(get_my_locked_locks());
}
+static void collect_matrix(lc_matrix_t* matrix)
+{
+ int i, j;
+ for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) {
+ for (j = 0; j <= i; j++) {
+ tot_lc_matrix.m[i][j] |= matrix->m[i][j];
+ }
+#ifdef DEBUG
+ for ( ; j < ERTS_LOCK_ORDER_SIZE; j++) {
+ ASSERT(matrix->m[i][j] == 0);
+ }
+#endif
+ }
+}
+
+Eterm
+erts_lc_dump_graph(void)
+{
+ const char* basename = "lc_graph.";
+ char filename[40];
+ lc_matrix_t* tot = &tot_lc_matrix;
+ lc_thread_t* thr;
+ int i, j, name_max = 0;
+ FILE* ff;
+
+ lc_lock_threads();
+ for (thr = lc_threads; thr; thr = thr->next) {
+ collect_matrix(&thr->matrix);
+ }
+ lc_unlock_threads();
+
+ sys_strcpy(filename, basename);
+ sys_get_pid(filename + strlen(basename),
+ sizeof(filename) - strlen(basename));
+ ff = fopen(filename, "w");
+ if (!ff)
+ return am_error;
+
+ for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) {
+ int len = strlen(erts_lock_order[i].name);
+ if (name_max < len)
+ name_max = len;
+ }
+ fputs("%This file was generated by erts_debug:lc_graph()\n\n", ff);
+ fputs("%{ThisLockName, ThisLockId, LockedDirectlyBeforeThis, LockedIndirectlyBeforeThis}\n", ff);
+ fprintf(ff, "[{%*s, %2d}", name_max, "\"NO LOCK\"", 0);
+ for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) {
+ char* delim = "";
+ fprintf(ff, ",\n {%*s, %2d, [", name_max, erts_lock_order[i].name, i);
+ for (j = 0; j < ERTS_LOCK_ORDER_SIZE; j++) {
+ if (tot->m[i][j] & 1) {
+ fprintf(ff, "%s%d", delim, j);
+ delim = ",";
+ }
+ }
+ fprintf(ff, "], [");
+ delim = "";
+ for (j = 0; j < ERTS_LOCK_ORDER_SIZE; j++) {
+ if (tot->m[i][j] == 2) {
+ fprintf(ff, "%s%d", delim, j);
+ delim = ",";
+ }
+ }
+ fputs("]}", ff);
+ }
+ fputs("].", ff);
+ fclose(ff);
+ erts_fprintf(stderr, "Created file '%s' in current working directory\n",
+ filename);
+ return am_ok;
+}
#endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index 18296d1fec..d10e32985a 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -36,6 +36,8 @@
#ifdef ERTS_ENABLE_LOCK_CHECK
+#include "erl_lock_flags.h"
+
#ifndef ERTS_ENABLE_LOCK_POSITION
/* Enable in order for _x variants of mtx functions to be used. */
#define ERTS_ENABLE_LOCK_POSITION 1
@@ -44,36 +46,14 @@
typedef struct {
int inited;
Sint16 id;
- Uint16 flags;
+ erts_lock_flags_t flags;
+ erts_lock_options_t taken_options;
UWord extra;
} erts_lc_lock_t;
#define ERTS_LC_INITITALIZED 0x7f7f7f7f
-
-#define ERTS_LC_FLG_LT_SPINLOCK (((Uint16) 1) << 0)
-#define ERTS_LC_FLG_LT_RWSPINLOCK (((Uint16) 1) << 1)
-#define ERTS_LC_FLG_LT_MUTEX (((Uint16) 1) << 2)
-#define ERTS_LC_FLG_LT_RWMUTEX (((Uint16) 1) << 3)
-#define ERTS_LC_FLG_LT_PROCLOCK (((Uint16) 1) << 4)
-
-#define ERTS_LC_FLG_LO_READ (((Uint16) 1) << 5)
-#define ERTS_LC_FLG_LO_WRITE (((Uint16) 1) << 6)
-
-#define ERTS_LC_FLG_LO_READ_WRITE (ERTS_LC_FLG_LO_READ \
- | ERTS_LC_FLG_LO_WRITE)
-
-#define ERTS_LC_FLG_LT_ALL (ERTS_LC_FLG_LT_SPINLOCK \
- | ERTS_LC_FLG_LT_RWSPINLOCK \
- | ERTS_LC_FLG_LT_MUTEX \
- | ERTS_LC_FLG_LT_RWMUTEX \
- | ERTS_LC_FLG_LT_PROCLOCK)
-
-#define ERTS_LC_FLG_LO_ALL (ERTS_LC_FLG_LO_READ \
- | ERTS_LC_FLG_LO_WRITE)
-
-
-#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), (X)}
+#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), 0, (X)}
void erts_lc_init(void);
void erts_lc_late_init(void);
@@ -83,46 +63,42 @@ void erts_lc_check(erts_lc_lock_t *have, int have_len,
void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
-void erts_lc_check_no_locked_of_type(Uint16 flags);
-int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_check_no_locked_of_type(erts_lock_flags_t flags);
+int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
int erts_lc_trylock_force_busy(erts_lc_lock_t *lck);
void erts_lc_trylock_x(int locked, erts_lc_lock_t *lck,
char* file, unsigned int line);
void erts_lc_lock_x(erts_lc_lock_t *lck, char* file, unsigned int line);
void erts_lc_unlock(erts_lc_lock_t *lck);
void erts_lc_might_unlock(erts_lc_lock_t *lck);
-void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags);
-void erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra);
+void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags);
+void erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra);
void erts_lc_destroy_lock(erts_lc_lock_t *lck);
void erts_lc_fail(char *fmt, ...);
int erts_lc_assert_failed(char *file, int line, char *assertion);
void erts_lc_set_thread_name(char *thread_name);
void erts_lc_pll(void);
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line);
void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
int erts_lc_is_emu_thr(void);
+Eterm erts_lc_dump_graph(void);
+
#define ERTS_LC_ASSERT(A) \
((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A)))
-#ifdef ERTS_SMP
-#define ERTS_SMP_LC_ASSERT(A) ERTS_LC_ASSERT(A)
-#else
-#define ERTS_SMP_LC_ASSERT(A) ((void) 1)
-#endif
#else /* #ifdef ERTS_ENABLE_LOCK_CHECK */
-#define ERTS_SMP_LC_ASSERT(A) ((void) 1)
#define ERTS_LC_ASSERT(A) ((void) 1)
#endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index a00e0f0fff..1ae6076b12 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,51 +18,37 @@
* %CopyrightEnd%
*/
-/*
- * Description: Statistics for locks.
- *
- * Author: Björn-Egil Dahlberg
- * Date: 2008-07-03
- */
-
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
-/* Needed for VxWorks va_arg */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
#include "sys.h"
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#include "global.h"
#include "erl_lock_count.h"
-#include "ethread.h"
-#include "erl_term.h"
-#include "atom.h"
-#include <stdio.h>
-
-/* globals, dont access these without locks or blocks */
+#include "erl_thr_progress.h"
-ethr_mutex lcnt_data_lock;
-erts_lcnt_data_t *erts_lcnt_data;
-Uint16 erts_lcnt_rt_options;
-erts_lcnt_time_t timer_start;
-const char *str_undefined = "undefined";
+#include "erl_node_tables.h"
+#include "erl_alloc_util.h"
+#include "erl_check_io.h"
+#include "erl_poll.h"
+#include "erl_db.h"
-static ethr_tsd_key lcnt_thr_data_key;
-static int lcnt_n_thr;
-static erts_lcnt_thread_data_t *lcnt_thread_data[2048];
+#define LCNT_MAX_CARRIER_ENTRIES 255
-/* local functions */
+/* - Locals that are shared with the header implementation - */
-static ERTS_INLINE void lcnt_lock(void) {
- ethr_mutex_lock(&lcnt_data_lock);
-}
+#ifdef DEBUG
+int lcnt_initialization_completed__;
+#endif
-static ERTS_INLINE void lcnt_unlock(void) {
- ethr_mutex_unlock(&lcnt_data_lock);
-}
+erts_lock_flags_t lcnt_category_mask__;
+ethr_tsd_key lcnt_thr_data_key__;
-const int log2_tab64[64] = {
+const int lcnt_log2_tab64__[64] = {
63, 0, 58, 1, 59, 47, 53, 2,
60, 39, 48, 27, 54, 33, 42, 3,
61, 51, 37, 40, 49, 18, 28, 20,
@@ -72,629 +58,602 @@ const int log2_tab64[64] = {
56, 45, 25, 31, 35, 16, 9, 12,
44, 24, 15, 8, 23, 7, 6, 5};
-static ERTS_INLINE int lcnt_log2(Uint64 v) {
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- v |= v >> 32;
- return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
-}
-
-static char* lcnt_lock_type(Uint16 flag) {
- switch(flag & ERTS_LCNT_LT_ALL) {
- case ERTS_LCNT_LT_SPINLOCK: return "spinlock";
- case ERTS_LCNT_LT_RWSPINLOCK: return "rw_spinlock";
- case ERTS_LCNT_LT_MUTEX: return "mutex";
- case ERTS_LCNT_LT_RWMUTEX: return "rw_mutex";
- case ERTS_LCNT_LT_PROCLOCK: return "proclock";
- default: return "";
- }
-}
+/* - Local variables - */
-static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
- ethr_atomic_set(&stats->tries, 0);
- ethr_atomic_set(&stats->colls, 0);
- stats->timer.s = 0;
- stats->timer.ns = 0;
- stats->timer_n = 0;
- stats->file = (char *)str_undefined;
- stats->line = 0;
- sys_memzero(stats->hist.ns, sizeof(stats->hist.ns));
-}
+typedef struct lcnt_static_lock_ref_ {
+ erts_lcnt_ref_t *reference;
-static void lcnt_time(erts_lcnt_time_t *time) {
- /*
- * erts_sys_hrtime() is the highest resolution
- * we could find, it may or may not be monotonic...
- */
- ErtsMonotonicTime mtime = erts_sys_hrtime();
- time->s = (unsigned long) (mtime / 1000000000LL);
- time->ns = (unsigned long) (mtime - 1000000000LL*time->s);
-}
+ erts_lock_flags_t flags;
+ const char *name;
+ Eterm id;
-static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
- long ds;
- long dns;
+ struct lcnt_static_lock_ref_ *next;
+} lcnt_static_lock_ref_t;
- ds = t1->s - t0->s;
- dns = t1->ns - t0->ns;
+static ethr_atomic_t lcnt_static_lock_registry;
- /* the difference should not be able to get bigger than 1 sec in ns*/
+static erts_lcnt_lock_info_list_t lcnt_current_lock_list;
+static erts_lcnt_lock_info_list_t lcnt_deleted_lock_list;
- if (dns < 0) {
- ds -= 1;
- dns += 1000000000LL;
- }
+static erts_lcnt_time_t lcnt_timer_start;
- ASSERT(ds >= 0);
+static int lcnt_preserve_info;
- d->s = ds;
- d->ns = dns;
-}
+/* local functions */
+
+static void lcnt_clear_stats(erts_lcnt_lock_info_t *info) {
+ size_t i;
+
+ for(i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
-/* difference d must be non-negative */
+ sys_memzero(&stats->wait_time_histogram, sizeof(stats->wait_time_histogram));
-static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
- t->s += d->s;
- t->ns += d->ns;
+ stats->total_time_waited.s = 0;
+ stats->total_time_waited.ns = 0;
- t->s += t->ns / 1000000000LL;
- t->ns = t->ns % 1000000000LL;
+ stats->times_waited = 0;
+
+ stats->file = NULL;
+ stats->line = 0;
+
+ ethr_atomic_set(&stats->attempts, 0);
+ ethr_atomic_set(&stats->collisions, 0);
+ }
+
+ info->location_count = 1;
}
-static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
- erts_lcnt_thread_data_t *eltd;
+static lcnt_thread_data_t__ *lcnt_thread_data_alloc(void) {
+ lcnt_thread_data_t__ *eltd =
+ (lcnt_thread_data_t__*)malloc(sizeof(lcnt_thread_data_t__));
- eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
- if (!eltd) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ if(!eltd) {
+ ERTS_INTERNAL_ERROR("Failed to allocate lcnt thread data.");
}
+
eltd->timer_set = 0;
eltd->lock_in_conflict = 0;
- eltd->id = lcnt_n_thr++;
- /* set thread data to array */
- lcnt_thread_data[eltd->id] = eltd;
-
return eltd;
-}
-
-static erts_lcnt_thread_data_t *lcnt_get_thread_data(void) {
- return (erts_lcnt_thread_data_t *)ethr_tsd_get(lcnt_thr_data_key);
}
-/* debug */
+/* - List operations -
+ *
+ * Info entries are kept in a doubly linked list where each entry is locked
+ * with its neighbors rather than a global lock. Deletion is rather quick, but
+ * insertion is still serial since the head becomes a de facto global lock.
+ *
+ * We rely on ad-hoc spinlocks to avoid "recursing" into this module. */
-#if 0
-static char* lock_opt(Uint16 flag) {
- if ((flag & ERTS_LCNT_LO_WRITE) && (flag & ERTS_LCNT_LO_READ)) return "rw";
- if (flag & ERTS_LCNT_LO_READ ) return "r ";
- if (flag & ERTS_LCNT_LO_WRITE) return " w";
- return "--";
-}
+#define LCNT_SPINLOCK_YIELD_ITERATIONS 50
-static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) {
- erts_aint_t w_state, r_state;
- char *type;
+#define LCNT_SPINLOCK_HELPER_INIT \
+ Uint failed_spin_count = 0;
- if (strcmp(lock->name, "run_queue") != 0) return;
- type = lcnt_lock_type(lock->flag);
- r_state = ethr_atomic_read(&lock->r_state);
- w_state = ethr_atomic_read(&lock->w_state);
+#define LCNT_SPINLOCK_HELPER_YIELD \
+ do { \
+ failed_spin_count++; \
+ if(!(failed_spin_count % LCNT_SPINLOCK_YIELD_ITERATIONS)) { \
+ erts_thr_yield(); \
+ } else { \
+ ERTS_SPIN_BODY; \
+ } \
+ } while(0)
- if (lock->flag & flag) {
- erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n",
- action,
- lock->name,
- r_state,
- w_state,
- type,
- lock->id);
- }
+static void lcnt_unlock_list_entry(erts_lcnt_lock_info_t *info) {
+ ethr_atomic32_set_relb(&info->lock, 0);
}
-#endif
-
-static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- unsigned int i;
- erts_lcnt_lock_stats_t *stats = NULL;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
- for (i = 0; i < lock->n_stats; i++) {
- if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
- return &(lock->stats[i]);
- }
- }
- if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
- stats = &lock->stats[lock->n_stats];
- lock->n_stats++;
- stats->file = file;
- stats->line = line;
- return stats;
- }
- }
- return &lock->stats[0];
+static int lcnt_try_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ return ethr_atomic32_cmpxchg_acqb(&info->lock, 1, 0) == 0;
}
-static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_wait) {
- int idx;
- unsigned long r;
+static void lcnt_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
- if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
- idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
- } else {
- r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
- if (r) idx = lcnt_log2(r);
- else idx = 0;
+ while(!lcnt_try_lock_list_entry(info)) {
+ LCNT_SPINLOCK_HELPER_YIELD;
}
- hist->ns[idx]++;
}
-static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict,
- erts_lcnt_time_t *time_wait) {
+static void lcnt_lock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
- ethr_atomic_inc(&stats->tries);
+ for(;;) {
+ if(!lcnt_try_lock_list_entry(info))
+ goto retry_after_entry_failed;
+ if(!lcnt_try_lock_list_entry(info->next))
+ goto retry_after_next_failed;
+ if(!lcnt_try_lock_list_entry(info->prev))
+ goto retry_after_prev_failed;
- if (lock_in_conflict)
- ethr_atomic_inc(&stats->colls);
+ return;
- if (time_wait) {
- lcnt_time_add(&(stats->timer), time_wait);
- stats->timer_n++;
- lcnt_update_stats_hist(&stats->hist,time_wait);
+ retry_after_prev_failed:
+ lcnt_unlock_list_entry(info->next);
+ retry_after_next_failed:
+ lcnt_unlock_list_entry(info);
+ retry_after_entry_failed:
+ LCNT_SPINLOCK_HELPER_YIELD;
}
}
-/* interface */
+static void lcnt_unlock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ lcnt_unlock_list_entry(info->prev);
+ lcnt_unlock_list_entry(info->next);
+ lcnt_unlock_list_entry(info);
+}
+
+static void lcnt_insert_list_entry(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t *info) {
+ erts_lcnt_lock_info_t *next, *prev;
+
+ prev = &list->head;
-void erts_lcnt_init() {
- erts_lcnt_thread_data_t *eltd = NULL;
+ lcnt_lock_list_entry(prev);
- /* init lock */
- if (ethr_mutex_init(&lcnt_data_lock) != 0) abort();
+ next = prev->next;
- /* init tsd */
- lcnt_n_thr = 0;
- ethr_tsd_key_create(&lcnt_thr_data_key,"lcnt_data");
+ lcnt_lock_list_entry(next);
- lcnt_lock();
+ info->next = next;
+ info->prev = prev;
- erts_lcnt_rt_options = ERTS_LCNT_OPT_LOCATION | ERTS_LCNT_OPT_PROCLOCK;
- eltd = lcnt_thread_data_alloc();
- ethr_tsd_set(lcnt_thr_data_key, eltd);
+ prev->next = info;
+ next->prev = info;
+
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
+}
- /* init lcnt structure */
- erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
- if (!erts_lcnt_data) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+static void lcnt_insert_list_carrier(erts_lcnt_lock_info_list_t *list,
+ erts_lcnt_lock_info_carrier_t *carrier) {
+ erts_lcnt_lock_info_t *next, *prev;
+ size_t i;
+
+ for(i = 0; i < carrier->entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[i];
+
+ info->prev = &carrier->entries[i - 1];
+ info->next = &carrier->entries[i + 1];
}
- erts_lcnt_data->current_locks = erts_lcnt_list_init();
- erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
- lcnt_unlock();
+ prev = &list->head;
- /* set start timer and zero statistics */
- erts_lcnt_clear_counters();
+ lcnt_lock_list_entry(prev);
+
+ next = prev->next;
+
+ lcnt_lock_list_entry(next);
+
+ next->prev = &carrier->entries[carrier->entry_count - 1];
+ carrier->entries[carrier->entry_count - 1].next = next;
+
+ prev->next = &carrier->entries[0];
+ carrier->entries[0].prev = prev;
+
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
}
-void erts_lcnt_late_init() {
- erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
+static void lcnt_init_list(erts_lcnt_lock_info_list_t *list) {
+ /* Ensure that ref_count operations explode when touching the sentinels in
+ * DEBUG mode. */
+ ethr_atomic_init(&(list->head.ref_count), -1);
+ ethr_atomic_init(&(list->tail.ref_count), -1);
+
+ ethr_atomic32_init(&(list->head.lock), 0);
+ (list->head).next = &list->tail;
+ (list->head).prev = &list->tail;
+
+ ethr_atomic32_init(&(list->tail.lock), 0);
+ (list->tail).next = &list->head;
+ (list->tail).prev = &list->head;
}
-/* list operations */
+/* - Carrier operations - */
-/* BEGIN ASSUMPTION: lcnt_data_lock taken */
+int lcnt_thr_progress_unmanaged_delay__(void) {
+ return erts_thr_progress_unmanaged_delay();
+}
-erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
- erts_lcnt_lock_list_t *list;
+void lcnt_thr_progress_unmanaged_continue__(int handle) {
+ return erts_thr_progress_unmanaged_continue(handle);
+}
- list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
- if (!list) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
- }
- list->head = NULL;
- list->tail = NULL;
- list->n = 0;
- return list;
+void lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == 0);
+ erts_free(ERTS_ALC_T_LCNT_CARRIER, (void*)carrier);
}
-/* only do this on the list with the deleted locks! */
-void erts_lcnt_list_clear(erts_lcnt_lock_list_t *list) {
- erts_lcnt_lock_t *lock = NULL,
- *next = NULL;
+static void lcnt_thr_prg_cleanup_carrier(void *data) {
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t entry_count, i;
- lock = list->head;
+ /* carrier->entry_count will be replaced with garbage if it's deallocated
+ * on the final iteration, so we'll tuck it away to get a clean exit. */
+ entry_count = carrier->entry_count;
- while(lock != NULL) {
- next = lock->next;
- free(lock);
- lock = next;
- }
+ for(i = 0; i < entry_count; i++) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) >= (entry_count - i));
- list->head = NULL;
- list->tail = NULL;
- list->n = 0;
+ erts_lcnt_release_lock_info(&carrier->entries[i]);
+ }
}
-void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_t *tail = NULL;
+static void lcnt_schedule_carrier_cleanup(void *data) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
- tail = list->tail;
- if (tail) {
- tail->next = lock;
- lock->prev = tail;
+ /* We can't issue cleanup jobs on anything other than normal schedulers, so
+ * we move to the first scheduler if required. */
+
+ if(!esdp || esdp->type != ERTS_SCHED_NORMAL) {
+ erts_schedule_misc_aux_work(1, &lcnt_schedule_carrier_cleanup, data);
} else {
- list->head = lock;
- lock->prev = NULL;
- ASSERT(!lock->next);
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t carrier_size;
+
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * carrier->entry_count;
+
+ erts_schedule_thr_prgr_later_cleanup_op(&lcnt_thr_prg_cleanup_carrier,
+ data, (ErtsThrPrgrLaterOp*)&carrier->release_entries, carrier_size);
}
- lock->next = NULL;
- list->tail = lock;
+}
- list->n++;
+static void lcnt_info_deallocate(erts_lcnt_lock_info_t *info) {
+ lcnt_release_carrier__(info->carrier);
}
-void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
- if (lock->next) lock->next->prev = lock->prev;
- if (lock->prev) lock->prev->next = lock->next;
- if (list->head == lock) list->head = lock->next;
- if (list->tail == lock) list->tail = lock->prev;
+static void lcnt_info_dispose(erts_lcnt_lock_info_t *info) {
+ ASSERT(ethr_atomic_read(&info->ref_count) == 0);
+
+ if(lcnt_preserve_info) {
+ ethr_atomic_set(&info->ref_count, 1);
- lock->prev = NULL;
- lock->next = NULL;
- list->n--;
+ /* Move straight to deallocation the next time around. */
+ info->dispose = &lcnt_info_deallocate;
+
+ lcnt_insert_list_entry(&lcnt_deleted_lock_list, info);
+ } else {
+ lcnt_info_deallocate(info);
+ }
}
-/* END ASSUMPTION: lcnt_data_lock taken */
+static void lcnt_lock_info_init_helper(erts_lcnt_lock_info_t *info) {
+ ethr_atomic_init(&info->ref_count, 1);
+ ethr_atomic32_init(&info->lock, 0);
-/* lock operations */
+ ethr_atomic_init(&info->r_state, 0);
+ ethr_atomic_init(&info->w_state, 0);
-/* interface to erl_threads.h */
-/* only lock on init and destroy, all others should use atomics */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
- erts_lcnt_init_lock_x(lock, name, flag, NIL);
+ info->dispose = &lcnt_info_dispose;
+
+ lcnt_clear_stats(info);
}
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
- int i;
- if (name == NULL) { ERTS_LCNT_CLEAR_FLAG(lock); return; }
- lcnt_lock();
+erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int entry_count) {
+ erts_lcnt_lock_info_carrier_t *result;
+ size_t carrier_size, i;
- lock->next = NULL;
- lock->prev = NULL;
- lock->flag = flag;
- lock->name = name;
- lock->id = id;
+ ASSERT(entry_count > 0 && entry_count <= LCNT_MAX_CARRIER_ENTRIES);
+ ASSERT(lcnt_initialization_completed__);
- ethr_atomic_init(&lock->r_state, 0);
- ethr_atomic_init(&lock->w_state, 0);
-#ifdef DEBUG
- ethr_atomic_init(&lock->flowstate, 0);
-#endif
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * entry_count;
- lock->n_stats = 1;
+ result = (erts_lcnt_lock_info_carrier_t*)erts_alloc(ERTS_ALC_T_LCNT_CARRIER, carrier_size);
+ result->entry_count = entry_count;
- for (i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- lcnt_clear_stats(&lock->stats[i]);
- }
+ ethr_atomic_init(&result->ref_count, entry_count);
- erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
- lcnt_unlock();
-}
-/* init empty, instead of zero struct */
-void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock) {
- lock->next = NULL;
- lock->prev = NULL;
- lock->flag = 0;
- lock->name = NULL;
- lock->id = NIL;
- ethr_atomic_init(&lock->r_state, 0);
- ethr_atomic_init(&lock->w_state, 0);
-#ifdef DEBUG
- ethr_atomic_init(&lock->flowstate, 0);
-#endif
- lock->n_stats = 0;
- sys_memzero(lock->stats, sizeof(lock->stats));
-}
-/* destroy lock */
-void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- lcnt_lock();
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_COPYSAVE) {
- erts_lcnt_lock_t *deleted_lock;
- /* copy structure and insert the copy */
- deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
- if (!deleted_lock) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
- }
- memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
- deleted_lock->next = NULL;
- deleted_lock->prev = NULL;
- erts_lcnt_list_insert(erts_lcnt_data->deleted_locks, deleted_lock);
+ for(i = 0; i < entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &result->entries[i];
+
+ lcnt_lock_info_init_helper(info);
+
+ info->carrier = result;
}
- /* delete original */
- erts_lcnt_list_delete(erts_lcnt_data->current_locks, lock);
- ERTS_LCNT_CLEAR_FLAG(lock);
- lcnt_unlock();
+ return result;
}
-/* lock */
+void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier) {
+ ethr_sint_t swapped_carrier;
-void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- erts_aint_t r_state = 0, w_state = 0;
- erts_lcnt_thread_data_t *eltd;
+#ifdef DEBUG
+ int i;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ /* Verify that all locks share the same categories/static property; all
+ * other flags are fair game. */
+ for(i = 1; i < carrier->entry_count; i++) {
+ const erts_lock_flags_t SIGNIFICANT_DIFF_MASK =
+ ERTS_LOCK_FLAGS_MASK_CATEGORY | ERTS_LOCK_FLAGS_PROPERTY_STATIC;
- eltd = lcnt_get_thread_data();
- ASSERT(eltd);
+ erts_lcnt_lock_info_t *previous, *current;
- w_state = ethr_atomic_read(&lock->w_state);
+ previous = &carrier->entries[i - 1];
+ current = &carrier->entries[i];
- if (option & ERTS_LCNT_LO_WRITE) {
- r_state = ethr_atomic_read(&lock->r_state);
- ethr_atomic_inc( &lock->w_state);
- }
- if (option & ERTS_LCNT_LO_READ) {
- ethr_atomic_inc( &lock->r_state);
+ ASSERT(!((previous->flags ^ current->flags) & SIGNIFICANT_DIFF_MASK));
}
+#endif
- /* we cannot acquire w_lock if either w or r are taken */
- /* we cannot acquire r_lock if w_lock is taken */
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)carrier, (ethr_sint_t)NULL);
- if ((w_state > 0) || (r_state > 0)) {
- eltd->lock_in_conflict = 1;
- if (eltd->timer_set == 0) {
- lcnt_time(&eltd->timer);
- }
- eltd->timer_set++;
+ if(swapped_carrier != (ethr_sint_t)NULL) {
+#ifdef DEBUG
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == carrier->entry_count);
+ ethr_atomic_set(&carrier->ref_count, 0);
+#endif
+
+ lcnt_deallocate_carrier__(carrier);
} else {
- eltd->lock_in_conflict = 0;
+ lcnt_insert_list_carrier(&lcnt_current_lock_list, carrier);
}
}
-void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
- erts_aint_t w_state;
- erts_lcnt_thread_data_t *eltd;
+void erts_lcnt_uninstall(erts_lcnt_ref_t *ref) {
+ ethr_sint_t previous_carrier, swapped_carrier;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ previous_carrier = ethr_atomic_read(ref);
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)NULL, previous_carrier);
- w_state = ethr_atomic_read(&lock->w_state);
- ethr_atomic_inc(&lock->w_state);
- eltd = lcnt_get_thread_data();
+ if(previous_carrier && previous_carrier == swapped_carrier) {
+ lcnt_schedule_carrier_cleanup((void*)previous_carrier);
+ }
+}
- ASSERT(eltd);
+/* - Static lock registry -
+ *
+ * Since static locks can be trusted to never disappear, we can track them
+ * pretty cheaply and won't need to bother writing an "erts_lcnt_update_xx"
+ * variant. */
- if (w_state > 0) {
- eltd->lock_in_conflict = 1;
- /* only set the timer if nobody else has it
- * This should only happen when proc_locks aquires several locks
- * 'atomicly'. All other locks will block the thread if w_state > 0
- * i.e. locked.
- */
- if (eltd->timer_set == 0) {
- lcnt_time(&eltd->timer);
- }
- eltd->timer_set++;
- } else {
- eltd->lock_in_conflict = 0;
- }
+static void lcnt_init_static_lock_registry(void) {
+ ethr_atomic_init(&lcnt_static_lock_registry, (ethr_sint_t)NULL);
}
-/* if a lock wasn't really a lock operation, bad bad process locks */
+static void lcnt_update_static_locks(void) {
+ lcnt_static_lock_ref_t *iterator =
+ (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
-void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
- /* should check if this thread was "waiting" */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ while(iterator != NULL) {
+ if(!erts_lcnt_check_enabled(iterator->flags)) {
+ erts_lcnt_uninstall(iterator->reference);
+ } else if(!erts_lcnt_check_ref_installed(iterator->reference)) {
+ erts_lcnt_lock_info_carrier_t *carrier = erts_lcnt_create_lock_info_carrier(1);
- ethr_atomic_dec(&lock->w_state);
-}
+ erts_lcnt_init_lock_info_idx(carrier, 0, iterator->name, iterator->id, iterator->flags);
-/*
- * erts_lcnt_lock_post
- *
- * Used when we get a lock (i.e. directly after a lock operation)
- * if the timer was set then we had to wait for the lock
- * lock_post will calculate the wait time.
- */
+ erts_lcnt_install(iterator->reference, carrier);
+ }
-void erts_lcnt_lock_post(erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_post_x(lock, (char*)str_undefined, 0);
+ iterator = iterator->next;
+ }
}
-void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- erts_lcnt_thread_data_t *eltd;
- erts_lcnt_time_t timer;
- erts_lcnt_time_t time_wait;
- erts_lcnt_lock_stats_t *stats;
-#ifdef DEBUG
- erts_aint_t flowstate;
-#endif
+void lcnt_register_static_lock__(erts_lcnt_ref_t *reference, const char *name, Eterm id,
+ erts_lock_flags_t flags) {
+ lcnt_static_lock_ref_t *lock = malloc(sizeof(lcnt_static_lock_ref_t));
+ int retry_insertion;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ ASSERT(flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC);
-#ifdef DEBUG
- if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc(&lock->flowstate);
- }
-#endif
+ lock->reference = reference;
+ lock->flags = flags;
+ lock->name = name;
+ lock->id = id;
- eltd = lcnt_get_thread_data();
+ do {
+ ethr_sint_t swapped_head;
- ASSERT(eltd);
+ lock->next = (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
- /* if lock was in conflict, time it */
- stats = lcnt_get_lock_stats(lock, file, line);
- if (eltd->timer_set) {
- lcnt_time(&timer);
+ swapped_head = ethr_atomic_cmpxchg_acqb(
+ &lcnt_static_lock_registry,
+ (ethr_sint_t)lock,
+ (ethr_sint_t)lock->next);
- lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
- lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
- eltd->timer_set--;
- ASSERT(eltd->timer_set >= 0);
- } else {
- lcnt_update_stats(stats, eltd->lock_in_conflict, NULL);
- }
+ retry_insertion = (swapped_head != (ethr_sint_t)lock->next);
+ } while(retry_insertion);
+}
+
+/* - Initialization - */
+
+void erts_lcnt_pre_thr_init() {
+ /* Ensure that the dependency hack mentioned in the header doesn't
+ * explode at runtime. */
+ ERTS_CT_ASSERT(sizeof(LcntThrPrgrLaterOp) >= sizeof(ErtsThrPrgrLaterOp));
+ ERTS_CT_ASSERT(ERTS_THR_PRGR_DHANDLE_MANAGED ==
+ (ErtsThrPrgrDelayHandle)LCNT_THR_PRGR_DHANDLE_MANAGED);
+
+ lcnt_init_list(&lcnt_current_lock_list);
+ lcnt_init_list(&lcnt_deleted_lock_list);
+ lcnt_init_static_lock_registry();
}
-/* unlock */
+void erts_lcnt_post_thr_init() {
+ /* ASSUMPTION: this is safe since it runs prior to the creation of other
+ * threads (Directly after ethread init). */
-void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_dec(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_dec(&lock->r_state);
+ ethr_tsd_key_create(&lcnt_thr_data_key__, "lcnt_data");
+
+ erts_lcnt_thread_setup();
}
-void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+void erts_lcnt_late_init() {
+ /* Set start timer and zero all statistics */
+ erts_lcnt_clear_counters();
+ erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
+
#ifdef DEBUG
- {
- erts_aint_t w_state;
- erts_aint_t flowstate;
-
- /* flowstate */
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 1);
- ethr_atomic_dec(&lock->flowstate);
-
- /* write state */
- w_state = ethr_atomic_read(&lock->w_state);
- ASSERT(w_state > 0);
- }
+ /* It's safe to use erts_alloc and thread progress past this point. */
+ lcnt_initialization_completed__ = 1;
#endif
- ethr_atomic_dec(&lock->w_state);
}
-/* trylock */
+void erts_lcnt_post_startup(void) {
+ /* Default to capturing everything to match the behavior of the old lock
+ * counter build. */
+ erts_lcnt_set_category_mask(ERTS_LOCK_FLAGS_MASK_CATEGORY);
+}
-void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- /* Determine lock_state via res instead of state */
- if (res != EBUSY) {
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_inc(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_inc(&lock->r_state);
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
- } else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
+void erts_lcnt_thread_setup() {
+ lcnt_thread_data_t__ *eltd = lcnt_thread_data_alloc();
+
+ ASSERT(eltd);
+
+ ethr_tsd_set(lcnt_thr_data_key__, eltd);
+}
+
+void erts_lcnt_thread_exit_handler() {
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
+
+ if (eltd) {
+ free(eltd);
}
}
+/* - BIF interface - */
-void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
- /* Determine lock_state via res instead of state */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- if (res != EBUSY) {
+void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info) {
#ifdef DEBUG
- {
- erts_aint_t flowstate;
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc( &lock->flowstate);
- }
+ ASSERT(ethr_atomic_inc_read_acqb(&info->ref_count) >= 2);
+#else
+ ethr_atomic_inc_acqb(&info->ref_count);
#endif
- ethr_atomic_inc(&lock->w_state);
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
+}
+
+void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info) {
+ ethr_sint_t count;
+
+ /* We need to acquire the lock before decrementing ref_count to avoid
+ * racing with list iteration; there's a short window between reading the
+ * reference to info and increasing its ref_count. */
+ lcnt_lock_list_entry_with_neighbors(info);
+
+ count = ethr_atomic_dec_read(&info->ref_count);
+
+ ASSERT(count >= 0);
+
+ if(count > 0) {
+ lcnt_unlock_list_entry_with_neighbors(info);
} else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
+ (info->next)->prev = info->prev;
+ (info->prev)->next = info->next;
+
+ lcnt_unlock_list_entry_with_neighbors(info);
+
+ info->dispose(info);
}
}
-/* thread operations */
+erts_lock_flags_t erts_lcnt_get_category_mask() {
+ return lcnt_category_mask__;
+}
-void erts_lcnt_thread_setup(void) {
- erts_lcnt_thread_data_t *eltd;
+void erts_lcnt_set_category_mask(erts_lock_flags_t mask) {
+ erts_lock_flags_t changed_categories;
- lcnt_lock();
- /* lock for thread id global update */
- eltd = lcnt_thread_data_alloc();
- lcnt_unlock();
- ASSERT(eltd);
- ethr_tsd_set(lcnt_thr_data_key, eltd);
-}
+ ASSERT(!(mask & ~ERTS_LOCK_FLAGS_MASK_CATEGORY));
+ ASSERT(lcnt_initialization_completed__);
-void erts_lcnt_thread_exit_handler() {
- erts_lcnt_thread_data_t *eltd;
+ changed_categories = (lcnt_category_mask__ ^ mask);
+ lcnt_category_mask__ = mask;
- eltd = ethr_tsd_get(lcnt_thr_data_key);
+ if(changed_categories) {
+ lcnt_update_static_locks();
+ }
- if (eltd) {
- free(eltd);
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION) {
+ erts_lcnt_update_distribution_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
}
-}
-/* bindings for bifs */
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
+ erts_lcnt_update_allocator_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+ }
+
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_PROCESS) {
+ erts_lcnt_update_process_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
+ }
-Uint16 erts_lcnt_set_rt_opt(Uint16 opt) {
- Uint16 prev;
- prev = (erts_lcnt_rt_options & opt);
- erts_lcnt_rt_options |= opt;
- return prev;
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_IO) {
+ erts_lcnt_update_cio_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_driver_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_port_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DB) {
+ erts_lcnt_update_db_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DB);
+ }
}
-Uint16 erts_lcnt_clear_rt_opt(Uint16 opt) {
- Uint16 prev;
- prev = (erts_lcnt_rt_options & opt);
- erts_lcnt_rt_options &= ~opt;
- return prev;
+void erts_lcnt_set_preserve_info(int enable) {
+ lcnt_preserve_info = enable;
+}
+
+int erts_lcnt_get_preserve_info() {
+ return lcnt_preserve_info;
}
void erts_lcnt_clear_counters(void) {
- erts_lcnt_lock_t *lock;
- erts_lcnt_lock_list_t *list;
- erts_lcnt_lock_stats_t *stats;
- int i;
+ erts_lcnt_lock_info_t *iterator;
- lcnt_lock();
+ lcnt_time__(&lcnt_timer_start);
- list = erts_lcnt_data->current_locks;
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_current_lock_list, &iterator)) {
+ lcnt_clear_stats(iterator);
+ }
- for (lock = list->head; lock != NULL; lock = lock->next) {
- for( i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- stats = &lock->stats[i];
- lcnt_clear_stats(stats);
- }
- lock->n_stats = 1;
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_deleted_lock_list, &iterator)) {
+ erts_lcnt_release_lock_info(iterator);
}
+}
+
+erts_lcnt_data_t erts_lcnt_get_data(void) {
+ erts_lcnt_time_t timer_stop;
+ erts_lcnt_data_t result;
- /* empty deleted locks in lock list */
- erts_lcnt_list_clear(erts_lcnt_data->deleted_locks);
+ lcnt_time__(&timer_stop);
- lcnt_time(&timer_start);
+ result.timer_start = lcnt_timer_start;
- lcnt_unlock();
+ result.current_locks = &lcnt_current_lock_list;
+ result.deleted_locks = &lcnt_deleted_lock_list;
+
+ lcnt_time_diff__(&result.duration, &timer_stop, &result.timer_start);
+
+ return result;
}
-erts_lcnt_data_t *erts_lcnt_get_data(void) {
- erts_lcnt_time_t timer_stop;
+int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator) {
+ erts_lcnt_lock_info_t *current, *next;
- lcnt_lock();
+ current = *iterator ? *iterator : &list->head;
- lcnt_time(&timer_stop);
- lcnt_time_diff(&(erts_lcnt_data->duration), &timer_stop, &timer_start);
+ ASSERT(current != &list->tail);
- lcnt_unlock();
+ lcnt_lock_list_entry(current);
- return erts_lcnt_data;
-}
+ next = current->next;
+
+ if(next != &list->tail) {
+ erts_lcnt_retain_lock_info(next);
+ }
+
+ lcnt_unlock_list_entry(current);
+
+ if(current != &list->head) {
+ erts_lcnt_release_lock_info(current);
+ }
+
+ *iterator = next;
-char *erts_lcnt_lock_type(Uint16 type) {
- return lcnt_lock_type(type);
+ return next != &list->tail;
}
-#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
+#endif /* #ifdef ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 3e8dcefe69..89d95a73cf 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,64 +18,51 @@
* %CopyrightEnd%
*/
-/*
- * Description: Statistics for locks.
- *
- * Author: Björn-Egil Dahlberg
- * Date: 2008-07-03
- * Abstract:
- * Locks statistics internal representation.
- *
- * Conceptual representation,
- * - set name
- * | - id (the unique lock)
- * | | - lock type
- * | | - statistics
- * | | | - location (file and line number)
- * | | | - tries
- * | | | - collisions (including trylock busy)
- * | | | - timer (time spent in waiting for lock)
- * | | | - n_timer (collisions excluding trylock busy)
- * | | | - histogram
- * | | | | - # 0 = log2(lock wait_time ns)
- * | | | | - ...
- * | | | | - # n = log2(lock wait_time ns)
- *
- * Each instance of a lock is the unique lock, i.e. set and id in that set.
- * For each lock there is a set of statistics with where and what impact
- * the lock aqusition had.
- *
- * Runtime options
- * - suspend, used when internal lock-counting can't be applied. For instance
- * when allocating a term for the outside and halloc needs to be used.
- * Default: off.
- * - location, reserved and not used.
- * - proclock, disable proclock counting. Used when performance might be an
- * issue. Accessible from erts_debug:lock_counters({process_locks, bool()}).
- * Default: off.
- * - copysave, enable saving of destroyed locks (and thereby its statistics).
- * If memory constraints is an issue this need to be disabled.
- * Accessible from erts_debug:lock_counters({copy_save, bool()}).
- * Default: off.
+/**
+ * @description Statistics for locks.
+ * @file erl_lock_count.h
+ *
+ * @author Björn-Egil Dahlberg
+ * @author John Högberg
+ *
+ * Conceptual representation:
*
+ * - set name
+ * | - id (the unique lock)
+ * | | - lock type
+ * | | - statistics
+ * | | | - location (file and line number)
+ * | | | - attempts
+ * | | | - collisions (including trylock busy)
+ * | | | - timer (time spent in waiting for lock)
+ * | | | - n_timer (collisions excluding trylock busy)
+ * | | | - histogram
+ * | | | | - # 0 = log2(lock wait_time ns)
+ * | | | | - ...
+ * | | | | - # n = log2(lock wait_time ns)
+ *
+ * Each instance of a lock is the unique lock, i.e. set and id in that set.
+ * For each lock there is a set of statistics with where and what impact
+ * the lock acquisition had.
*/
-#include "sys.h"
-
#ifndef ERTS_LOCK_COUNT_H__
#define ERTS_LOCK_COUNT_H__
#ifdef ERTS_ENABLE_LOCK_COUNT
#ifndef ERTS_ENABLE_LOCK_POSITION
-/* Enable in order for _x variants of mtx functions to be used. */
+/** @brief Controls whether _x variants of mtx functions are used. */
#define ERTS_ENABLE_LOCK_POSITION 1
#endif
+#include "sys.h"
#include "ethread.h"
-#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
+#include "erl_term.h"
+#include "erl_lock_flags.h"
+
+#define ERTS_LCNT_MAX_LOCK_LOCATIONS (5)
-/* histogram */
#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1)
#if 0 || defined(ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT)
#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30)
@@ -85,153 +72,857 @@
#define ERTS_LCNT_HISTOGRAM_RSHIFT (10)
#endif
-#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
-#define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1)
-#define ERTS_LCNT_LT_MUTEX (((Uint16) 1) << 2)
-#define ERTS_LCNT_LT_RWMUTEX (((Uint16) 1) << 3)
-#define ERTS_LCNT_LT_PROCLOCK (((Uint16) 1) << 4)
-#define ERTS_LCNT_LT_ALLOC (((Uint16) 1) << 5)
-
-#define ERTS_LCNT_LO_READ (((Uint16) 1) << 6)
-#define ERTS_LCNT_LO_WRITE (((Uint16) 1) << 7)
-
-#define ERTS_LCNT_LO_READ_WRITE ( ERTS_LCNT_LO_READ \
- | ERTS_LCNT_LO_WRITE )
-
-#define ERTS_LCNT_LT_ALL ( ERTS_LCNT_LT_SPINLOCK \
- | ERTS_LCNT_LT_RWSPINLOCK \
- | ERTS_LCNT_LT_MUTEX \
- | ERTS_LCNT_LT_RWMUTEX \
- | ERTS_LCNT_LT_PROCLOCK )
-
-#define ERTS_LCNT_LOCK_TYPE(lock) ((lock)->flag & ERTS_LCNT_LT_ALL)
-#define ERTS_LCNT_IS_LOCK_INVALID(lock) (!((lock)->flag & ERTS_LCNT_LT_ALL))
-#define ERTS_LCNT_CLEAR_FLAG(lock) ((lock)->flag = 0)
-
-/* runtime options */
-
-#define ERTS_LCNT_OPT_SUSPEND (((Uint16) 1) << 0)
-#define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1)
-#define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2)
-#define ERTS_LCNT_OPT_PORTLOCK (((Uint16) 1) << 3)
-#define ERTS_LCNT_OPT_COPYSAVE (((Uint16) 1) << 4)
-
typedef struct {
unsigned long s;
unsigned long ns;
} erts_lcnt_time_t;
-extern erts_lcnt_time_t timer_start;
-
typedef struct {
- Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */
+ /* @brief log2 array of nano seconds occurences */
+ Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
} erts_lcnt_hist_t;
-typedef struct erts_lcnt_lock_stats_s {
- /* "tries" and "colls" needs to be atomic since
- * trylock busy does not aquire a lock and there
- * is no post action to rectify the situation
- */
+typedef struct {
+ /** @brief In which file the lock was taken. May be NULL. */
+ const char *file;
+ /** @brief Line number in \c file */
+ unsigned int line;
- char *file; /* which file the lock was taken */
- unsigned int line; /* line number in file */
+ /* "attempts" and "collisions" need to be atomic since try_lock busy does
+ * not acquire a lock and there is no post action to rectify the
+ * situation. */
- ethr_atomic_t tries; /* n tries to get lock */
- ethr_atomic_t colls; /* n collisions of tries to get lock */
+ ethr_atomic_t attempts;
+ ethr_atomic_t collisions;
- unsigned long timer_n; /* #times waited for lock */
- erts_lcnt_time_t timer; /* total wait time for lock */
- erts_lcnt_hist_t hist;
+ erts_lcnt_time_t total_time_waited;
+ Uint64 times_waited;
+
+ erts_lcnt_hist_t wait_time_histogram;
} erts_lcnt_lock_stats_t;
-/* rw locks uses both states, other locks only uses w_state */
-typedef struct erts_lcnt_lock_s {
- char *name; /* lock name */
- Uint16 flag; /* lock type */
- Eterm id; /* id if possible */
+typedef struct lcnt_lock_info_t_ {
+ erts_lock_flags_t flags;
+ const char *name;
+ /** @brief Id if possible, must be an immediate */
+ Eterm id;
-#ifdef DEBUG
- ethr_atomic_t flowstate;
-#endif
+ /* The first entry is reserved as a fallback for when location information
+ * is missing, and when the lock is used in more than (MAX_LOCK_LOCATIONS
+ * - 1) different places. */
+ erts_lcnt_lock_stats_t location_stats[ERTS_LCNT_MAX_LOCK_LOCATIONS];
+ unsigned int location_count;
- /* lock states */
- ethr_atomic_t w_state; /* 0 not taken, otherwise n threads waiting */
- ethr_atomic_t r_state; /* 0 not taken, > 0 -> writes will wait */
+ /* -- Everything below is internal to this module ---------------------- */
- /* statistics */
- unsigned int n_stats;
- erts_lcnt_lock_stats_t stats[ERTS_LCNT_MAX_LOCK_LOCATIONS]; /* first entry is "undefined"*/
+ /* Lock states; rw locks uses both states, other locks only uses w_state */
- /* chains for list handling */
- /* data is hold by lcnt_lock */
- struct erts_lcnt_lock_s *prev;
- struct erts_lcnt_lock_s *next;
-} erts_lcnt_lock_t;
+ /** @brief Write state. 0 = not taken, otherwise n threads waiting */
+ ethr_atomic_t w_state;
+ /** @brief Read state. 0 = not taken, > 0 -> writes will wait */
+ ethr_atomic_t r_state;
-typedef struct {
- erts_lcnt_lock_t *head;
- erts_lcnt_lock_t *tail;
- unsigned long n;
-} erts_lcnt_lock_list_t;
+ struct lcnt_lock_info_t_ *prev;
+ struct lcnt_lock_info_t_ *next;
-typedef struct {
- erts_lcnt_time_t duration; /* time since last clear */
- erts_lcnt_lock_list_t *current_locks;
- erts_lcnt_lock_list_t *deleted_locks;
-} erts_lcnt_data_t;
+ /** @brief Used in place of erts_refc_t to avoid a circular dependency. */
+ ethr_atomic_t ref_count;
+ ethr_atomic32_t lock;
+
+ /** @brief Deletion hook called once \c ref_count reaches 0; may defer
+ * deletion by modifying \c ref_count. */
+ void (*dispose)(struct lcnt_lock_info_t_ *);
+
+ struct lcnt_lock_info_carrier_ *carrier;
+} erts_lcnt_lock_info_t;
+
+typedef struct lcnt_lock_info_list_ {
+ erts_lcnt_lock_info_t head;
+ erts_lcnt_lock_info_t tail;
+} erts_lcnt_lock_info_list_t;
typedef struct {
- int id;
+ erts_lcnt_time_t timer_start; /**< Time of last clear */
+ erts_lcnt_time_t duration; /**< Time since last clear */
+
+ erts_lcnt_lock_info_list_t *current_locks;
+ erts_lcnt_lock_info_list_t *deleted_locks;
+} erts_lcnt_data_t;
- erts_lcnt_time_t timer; /* timer */
- int timer_set; /* bool */
- int lock_in_conflict; /* bool */
-} erts_lcnt_thread_data_t;
+typedef struct lcnt_lock_info_carrier_ erts_lcnt_lock_info_carrier_t;
-/* globals */
+typedef ethr_atomic_t erts_lcnt_ref_t;
-extern Uint16 erts_lcnt_rt_options;
+/* -- Globals -------------------------------------------------------------- */
-/* function declerations */
+/** @brief Checks whether counting is enabled for any of the given
+ * categories. */
+#define erts_lcnt_check_enabled(flags) \
+ (lcnt_category_mask__ & flags)
-void erts_lcnt_init(void);
+/* -- Lock operations ------------------------------------------------------
+ *
+ * All of these will nop if there's nothing "installed" on the given reference,
+ * in order to transparently support enable/disable at runtime. */
+
+/** @brief Records that a lock is being acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_lock
+ * @param option Notes whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option);
+
+/** @brief Records that a lock has been acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_lock_post.
+ * @param file The name of the file where the lock was acquired.
+ * @param line The line at which the lock was acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line);
+
+/** @brief Records that a lock has been released. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_unlock_opt
+ * @param option Whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option);
+
+/** @brief Rectifies the case where a lock wasn't actually a lock operation.
+ *
+ * Only used for process locks at the moment. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref);
+
+/** @brief Records the result of a trylock, placing the queried lock status in
+ * \c result. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result);
+
+/** @copydoc erts_lcnt_trylock
+ * @param option Whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, erts_lock_options_t option);
+
+/* Indexed variants of the standard lock operations, for use when a single
+ * reference contains many counters (eg. process locks).
+ *
+ * erts_lcnt_open_ref must be used to safely extract the installed carrier,
+ * which must released with erts_lcnt_close_reference on success.
+ *
+ * Refer to \c erts_lcnt_lock for example usage. */
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option);
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line);
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option);
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result);
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, erts_lock_options_t option);
+
+/* -- Reference operations ------------------------------------------------- */
+
+/** @brief Registers a lock counter reference; this must be called prior to
+ * using any other functions in this module. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref(erts_lcnt_ref_t *ref);
+
+/** @brief As \c erts_lcnt_init_ref, but also enables lock counting right
+ * away if appropriate to reduce noise.
+ * @param id An immediate erlang term with whatever extra data you want to
+ * identify this lock with. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref_x(erts_lcnt_ref_t *ref, const char *name,
+ Eterm id, erts_lock_flags_t flags);
+
+/** @brief Checks whether counting is enabled on the given reference. */
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_check_ref_installed(erts_lcnt_ref_t *ref);
+
+/** @brief Convenience macro to re/enable counting on an already initialized
+ * reference. Don't forget to specify the lock type in \c flags! */
+#define erts_lcnt_install_new_lock_info(ref, name, id, flags) \
+ if(!erts_lcnt_check_ref_installed(ref)) { \
+ erts_lcnt_lock_info_carrier_t *__carrier; \
+ __carrier = erts_lcnt_create_lock_info_carrier(1);\
+ erts_lcnt_init_lock_info_idx(__carrier, 0, name, id, flags); \
+ erts_lcnt_install(ref, __carrier);\
+ } while(0)
+
+erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int count);
+
+/* @brief Initializes the lock info at the given index.
+ * @param id An immediate erlang term with whatever extra data you want to
+ * identify this lock with.
+ * @param flags The flags the lock itself was initialized with. Keep in mind
+ * that all locks in a carrier must share the same category/static property. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_lock_info_idx(erts_lcnt_lock_info_carrier_t *carrier, int index,
+ const char *name, Eterm id, erts_lock_flags_t flags);
+
+/** @brief Atomically installs the given lock counters. Nops (and releases the
+ * provided carrier) if something was already installed. */
+void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier);
+
+/** @brief Atomically removes the currently installed lock counters. Nops if
+ * nothing was installed. */
+void erts_lcnt_uninstall(erts_lcnt_ref_t *ref);
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result);
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier);
+
+/* -- Module initialization ------------------------------------------------ */
+
+void erts_lcnt_pre_thr_init(void);
+void erts_lcnt_post_thr_init(void);
void erts_lcnt_late_init(void);
-/* thread operations */
+/* @brief Called after everything in the system has been initialized, including
+ * the schedulers. This is mainly a backwards compatibility shim for matching
+ * the old lcnt behavior where all lock counting was enabled by default. */
+void erts_lcnt_post_startup(void);
+
void erts_lcnt_thread_setup(void);
void erts_lcnt_thread_exit_handler(void);
-/* list operations (local) */
-erts_lcnt_lock_list_t *erts_lcnt_list_init(void);
-
-void erts_lcnt_list_clear( erts_lcnt_lock_list_t *list);
-void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
-void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
-
-/* lock operations (global) */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag);
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id);
-void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock);
-void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock);
-
-void erts_lcnt_lock(erts_lcnt_lock_t *lock);
-void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option);
-void erts_lcnt_lock_post(erts_lcnt_lock_t *lock);
-void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line);
-void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock);
-
-void erts_lcnt_unlock(erts_lcnt_lock_t *lock);
-void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option);
-
-void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option);
-void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res);
-
-/* bif interface */
-Uint16 erts_lcnt_set_rt_opt(Uint16 opt);
-Uint16 erts_lcnt_clear_rt_opt(Uint16 opt);
-void erts_lcnt_clear_counters(void);
-char *erts_lcnt_lock_type(Uint16 type);
-erts_lcnt_data_t *erts_lcnt_get_data(void);
+/* -- BIF interface -------------------------------------------------------- */
+
+/** @brief Safely iterates through all entries in the given list.
+ *
+ * The referenced item will be valid until the next call to
+ * \c erts_lcnt_iterate_list after which point it may be destroyed; call
+ * erts_lcnt_retain_lock_info if you wish to hang on to it beyond that point.
+ *
+ * Iteration can be cancelled by calling erts_lcnt_release_lock_info on the
+ * iterator and breaking out of the loop.
+ *
+ * @param iterator The iteration variable; set the pointee to NULL to start
+ * iteration.
+ * @return 1 while the iterator is valid, 0 at the end of the list. */
+int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator);
+
+/** @brief Clears the counter state of all locks, and releases all locks
+ * preserved through erts_lcnt_set_preserve_info (if any). */
+void erts_lcnt_clear_counters(void);
+
+/** @brief Retrieves the global lock counter state.
+ *
+ * Note that the lists may be modified while you're mucking around with them.
+ * Always use \c erts_lcnt_iterate_list to enumerate them. */
+erts_lcnt_data_t erts_lcnt_get_data(void);
+
+void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info);
+void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info);
+
+/** @brief Sets whether to preserve the info of destroyed/uninstalled locks.
+ *
+ * This option makes no distinction whether the lock was destroyed or if lock
+ * counting was simply disabled, so erts_lcnt_set_category_mask must not be
+ * used while this option is active. */
+void erts_lcnt_set_preserve_info(int enable);
+
+int erts_lcnt_get_preserve_info(void);
+
+/** @brief Updates the category mask, enabling or disabling counting on the
+ * affected locks as necessary.
+ *
+ * This is not guaranteed to find all existing locks; only those that are
+ * flagged as static locks and those reachable through other means can be
+ * altered. */
+void erts_lcnt_set_category_mask(erts_lock_flags_t mask);
+
+erts_lock_flags_t erts_lcnt_get_category_mask(void);
+
+/* -- Inline implementation ------------------------------------------------ */
+
+/* The following is a hack to get the things we need from erl_thr_progress.h,
+ * which we can't #include without dependency hell breaking loose.
+ *
+ * The size of LcntThrPrgrLaterOp and value of the constant are verified at
+ * compile-time in erts_lcnt_pre_thr_init. */
+
+int lcnt_thr_progress_unmanaged_delay__(void);
+void lcnt_thr_progress_unmanaged_continue__(int handle);
+typedef struct { Uint64 _[4]; } LcntThrPrgrLaterOp;
+#define LCNT_THR_PRGR_DHANDLE_MANAGED -1
+
+struct lcnt_lock_info_carrier_ {
+ ethr_atomic_t ref_count;
+
+ LcntThrPrgrLaterOp release_entries;
+
+ unsigned char entry_count;
+ erts_lcnt_lock_info_t entries[];
+};
+
+typedef struct {
+ erts_lcnt_time_t timer; /* timer */
+ int timer_set; /* bool */
+ int lock_in_conflict; /* bool */
+} lcnt_thread_data_t__;
+
+extern const int lcnt_log2_tab64__[];
+
+extern ethr_tsd_key lcnt_thr_data_key__;
+extern erts_lock_flags_t lcnt_category_mask__;
+
+#ifdef DEBUG
+extern int lcnt_initialization_completed__;
+#endif
+
+void lcnt_register_static_lock__(erts_lcnt_ref_t *reference, const char *name, Eterm id,
+ erts_lock_flags_t flags);
+
+void lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
+
+ERTS_GLB_INLINE
+int lcnt_log2__(Uint64 v);
+
+ERTS_GLB_INLINE
+void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited);
+
+ERTS_GLB_INLINE
+void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited);
+
+ERTS_GLB_INLINE
+erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line);
+
+ERTS_GLB_INLINE
+void lcnt_dec_lock_state__(ethr_atomic_t *l_state);
+
+ERTS_GLB_INLINE
+void lcnt_time__(erts_lcnt_time_t *time);
+
+ERTS_GLB_INLINE
+void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d);
+
+ERTS_GLB_INLINE
+void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0);
+
+ERTS_GLB_INLINE
+void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
+
+ERTS_GLB_INLINE
+void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
+
+ERTS_GLB_INLINE
+lcnt_thread_data_t__ *lcnt_get_thread_data__(void);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+void lcnt_time__(erts_lcnt_time_t *time) {
+ /*
+ * erts_sys_hrtime() is the highest resolution
+ * we could find, it may or may not be monotonic...
+ */
+ ErtsMonotonicTime mtime = erts_sys_hrtime();
+ time->s = (unsigned long) (mtime / 1000000000LL);
+ time->ns = (unsigned long) (mtime - 1000000000LL*time->s);
+}
+
+/* difference d must be non-negative */
+
+ERTS_GLB_INLINE
+void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
+ t->s += d->s;
+ t->ns += d->ns;
+
+ t->s += t->ns / 1000000000LL;
+ t->ns = t->ns % 1000000000LL;
+}
+
+ERTS_GLB_INLINE
+void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
+ long ds;
+ long dns;
+
+ ds = t1->s - t0->s;
+ dns = t1->ns - t0->ns;
+
+ /* the difference should not be able to get bigger than 1 sec in ns*/
+
+ if (dns < 0) {
+ ds -= 1;
+ dns += 1000000000LL;
+ }
+
+ ASSERT(ds >= 0);
+
+ d->s = ds;
+ d->ns = dns;
+}
+
+ERTS_GLB_INLINE
+int lcnt_log2__(Uint64 v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+
+ return lcnt_log2_tab64__[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
+}
+
+ERTS_GLB_INLINE
+void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited) {
+ int idx;
+
+ if(time_waited->s > 0 || time_waited->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
+ idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
+ } else {
+ unsigned long r = time_waited->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
+
+ idx = r ? lcnt_log2__(r) : 0;
+ }
+
+ hist->ns[idx]++;
+}
+
+ERTS_GLB_INLINE
+void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited) {
+ ethr_atomic_inc(&stats->attempts);
+
+ if(lock_in_conflict) {
+ ethr_atomic_inc(&stats->collisions);
+ }
+
+ if(time_waited) {
+ stats->times_waited++;
+
+ lcnt_time_add__(&stats->total_time_waited, time_waited);
+ lcnt_update_wait_histogram__(&stats->wait_time_histogram, time_waited);
+ }
+}
+
+/* If we were installed while the lock was held, r/w_state will be 0 and we
+ * can't tell which unlock or unacquire operation was the last. To get around
+ * this we assume that all excess operations go *towards* zero rather than down
+ * to zero, eventually becoming consistent with the actual state once the lock
+ * is fully released.
+ *
+ * Conflicts might not be counted until the recorded state is fully consistent
+ * with the actual state, but there should be no other ill effects. */
+
+ERTS_GLB_INLINE
+void lcnt_dec_lock_state__(ethr_atomic_t *l_state) {
+ ethr_sint_t state = ethr_atomic_dec_read_acqb(l_state);
+
+ /* We can not assume that state is >= -1 here; unlock and unacquire might
+ * bring it below -1 and race to increment it back. */
+
+ if(state < 0) {
+ ethr_atomic_inc_acqb(l_state);
+ }
+}
+
+ERTS_GLB_INLINE
+erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line) {
+ unsigned int i;
+
+ ASSERT(info->location_count >= 1 && info->location_count <= ERTS_LCNT_MAX_LOCK_LOCATIONS);
+
+ for(i = 0; i < info->location_count; i++) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
+
+ if(stats->file == file && stats->line == line) {
+ return stats;
+ }
+ }
+
+ if(info->location_count < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[info->location_count];
+
+ stats->file = file;
+ stats->line = line;
+
+ info->location_count++;
+
+ return stats;
+ }
+
+ return &info->location_stats[0];
+}
+
+ERTS_GLB_INLINE
+lcnt_thread_data_t__ *lcnt_get_thread_data__(void) {
+ lcnt_thread_data_t__ *eltd = (lcnt_thread_data_t__ *)ethr_tsd_get(lcnt_thr_data_key__);
+
+ ASSERT(eltd);
+
+ return eltd;
+}
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result) {
+ if(ERTS_LIKELY(!erts_lcnt_check_ref_installed(ref))) {
+ return 0;
+ }
+
+ ASSERT(lcnt_initialization_completed__);
+
+ (*handle) = lcnt_thr_progress_unmanaged_delay__();
+ (*result) = (erts_lcnt_lock_info_carrier_t*)ethr_atomic_read(ref);
+
+ if(*result) {
+ if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_retain_carrier__(*result);
+ lcnt_thr_progress_unmanaged_continue__(*handle);
+ }
+
+ return 1;
+ } else if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_thr_progress_unmanaged_continue__(*handle);
+ }
+
+ return 0;
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier) {
+ if(handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_release_carrier__(carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref(erts_lcnt_ref_t *ref) {
+ ethr_atomic_init(ref, (ethr_sint_t)NULL);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref_x(erts_lcnt_ref_t *ref, const char *name,
+ Eterm id, erts_lock_flags_t flags) {
+ erts_lcnt_init_ref(ref);
+
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC) {
+ lcnt_register_static_lock__(ref, name, id, flags);
+ }
+
+ if(erts_lcnt_check_enabled(flags)) {
+ erts_lcnt_install_new_lock_info(ref, name, id, flags);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_check_ref_installed(erts_lcnt_ref_t *ref) {
+ return (!!*ethr_atomic_addr(ref));
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_opt_idx(carrier, 0, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_post_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_post_x_idx(carrier, 0, file, line);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_unacquire_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_unlock_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_unlock_opt_idx(carrier, 0, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_trylock_idx(carrier, 0, result);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_trylock_opt_idx(carrier, 0, result, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_opt_idx(carrier, index, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ ethr_sint_t w_state, r_state;
+
+ w_state = ethr_atomic_inc_read(&info->w_state) - 1;
+ r_state = ethr_atomic_read(&info->r_state);
+
+ /* We cannot acquire w_lock if either w or r are taken */
+ eltd->lock_in_conflict = (w_state > 0) || (r_state > 0);
+ } else {
+ ethr_sint_t w_state = ethr_atomic_read(&info->w_state);
+
+ /* We cannot acquire r_lock if w_lock is taken */
+ eltd->lock_in_conflict = (w_state > 0);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ ethr_atomic_inc(&info->r_state);
+ }
+
+ if(eltd->lock_in_conflict) {
+ /* Only set the timer if nobody else has it. This should only happen
+ * when proc_locks acquires several locks "atomically." All other locks
+ * will block the thread when locked (w_state > 0) */
+ if(eltd->timer_set == 0) {
+ lcnt_time__(&eltd->timer);
+ }
+
+ eltd->timer_set++;
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_post_x_idx(carrier, index, NULL, 0);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
+ erts_lcnt_lock_stats_t *stats;
+
+ ASSERT(index < carrier->entry_count);
+
+ /* If the lock was in conflict, update the time spent waiting. */
+ stats = lcnt_get_lock_stats__(info, file, line);
+ if(eltd->timer_set) {
+ erts_lcnt_time_t time_wait;
+ erts_lcnt_time_t timer;
+
+ lcnt_time__(&timer);
+
+ lcnt_time_diff__(&time_wait, &timer, &eltd->timer);
+ lcnt_update_stats__(stats, eltd->lock_in_conflict, &time_wait);
+
+ eltd->timer_set--;
+
+ ASSERT(eltd->timer_set >= 0);
+ } else {
+ lcnt_update_stats__(stats, eltd->lock_in_conflict, NULL);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ ASSERT(index < carrier->entry_count);
+
+ erts_lcnt_unlock_opt_idx(carrier, index, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ lcnt_dec_lock_state__(&info->w_state);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ lcnt_dec_lock_state__(&info->r_state);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ lcnt_dec_lock_state__(&info->w_state);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result) {
+ ASSERT(index < carrier->entry_count);
+
+ erts_lcnt_trylock_opt_idx(carrier, index, result, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(result != EBUSY) {
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ ethr_atomic_inc(&info->w_state);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ ethr_atomic_inc(&info->r_state);
+ }
+
+ lcnt_update_stats__(&info->location_stats[0], 0, NULL);
+ } else {
+ ethr_atomic_inc(&info->location_stats[0].attempts);
+ ethr_atomic_inc(&info->location_stats[0].collisions);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_lock_info_idx(erts_lcnt_lock_info_carrier_t *carrier, int index,
+ const char *name, Eterm id, erts_lock_flags_t flags) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(is_immed(id));
+
+ ASSERT(flags & ERTS_LOCK_FLAGS_MASK_TYPE);
+ ASSERT(flags & ERTS_LOCK_FLAGS_MASK_CATEGORY);
+
+ info->flags = flags;
+ info->name = name;
+ info->id = id;
+}
+
+ERTS_GLB_INLINE
+void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+#ifdef DEBUG
+ ASSERT(ethr_atomic_inc_read_acqb(&carrier->ref_count) >= 2);
+#else
+ ethr_atomic_inc_acqb(&carrier->ref_count);
+#endif
+}
+
+ERTS_GLB_INLINE
+void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+ ethr_sint_t count = ethr_atomic_dec_read_relb(&carrier->ref_count);
+
+ ASSERT(count >= 0);
+
+ if(count == 0) {
+ lcnt_deallocate_carrier__(carrier);
+ }
+}
+
+#endif
#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
#endif /* ifndef ERTS_LOCK_COUNT_H__ */
diff --git a/erts/emulator/beam/erl_lock_flags.c b/erts/emulator/beam/erl_lock_flags.c
new file mode 100644
index 0000000000..e0a0e95c09
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_flags.c
@@ -0,0 +1,59 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_lock_flags.h"
+
+const char *erts_lock_flags_get_type_name(erts_lock_flags_t flags) {
+ switch(flags & ERTS_LOCK_FLAGS_MASK_TYPE) {
+ case ERTS_LOCK_FLAGS_TYPE_PROCLOCK:
+ return "proclock";
+ case ERTS_LOCK_FLAGS_TYPE_MUTEX:
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE) {
+ return "rw_mutex";
+ }
+
+ return "mutex";
+ case ERTS_LOCK_FLAGS_TYPE_SPINLOCK:
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE) {
+ return "rw_spinlock";
+ }
+
+ return "spinlock";
+ default:
+ return "garbage";
+ }
+}
+
+const char *erts_lock_options_get_short_desc(erts_lock_options_t options) {
+ switch(options) {
+ case ERTS_LOCK_OPTIONS_RDWR:
+ return "rw";
+ case ERTS_LOCK_OPTIONS_READ:
+ return "r";
+ case ERTS_LOCK_OPTIONS_WRITE:
+ return "w";
+ default:
+ return "none";
+ }
+}
diff --git a/erts/emulator/beam/erl_lock_flags.h b/erts/emulator/beam/erl_lock_flags.h
new file mode 100644
index 0000000000..d711f69456
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_flags.h
@@ -0,0 +1,78 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERTS_LOCK_FLAGS_H__
+#define ERTS_LOCK_FLAGS_H__
+
+#define ERTS_LOCK_OPTIONS_READ (1 << 1)
+#define ERTS_LOCK_OPTIONS_WRITE (1 << 2)
+
+#define ERTS_LOCK_OPTIONS_RDWR (ERTS_LOCK_OPTIONS_READ | ERTS_LOCK_OPTIONS_WRITE)
+
+/* Property/category are bitfields to simplify their use in masks. */
+#define ERTS_LOCK_FLAGS_MASK_CATEGORY (0xFFC0)
+#define ERTS_LOCK_FLAGS_MASK_PROPERTY (0x0030)
+
+/* Type is a plain number. */
+#define ERTS_LOCK_FLAGS_MASK_TYPE (0x000F)
+
+#define ERTS_LOCK_FLAGS_TYPE_SPINLOCK (1)
+#define ERTS_LOCK_FLAGS_TYPE_MUTEX (2)
+#define ERTS_LOCK_FLAGS_TYPE_PROCLOCK (3)
+
+/* "Static" guarantees that the lock will never be destroyed once created. */
+#define ERTS_LOCK_FLAGS_PROPERTY_STATIC (1 << 4)
+#define ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE (1 << 5)
+
+#define ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR (1 << 6)
+#define ERTS_LOCK_FLAGS_CATEGORY_PROCESS (1 << 7)
+#define ERTS_LOCK_FLAGS_CATEGORY_IO (1 << 8)
+#define ERTS_LOCK_FLAGS_CATEGORY_DB (1 << 9)
+#define ERTS_LOCK_FLAGS_CATEGORY_DEBUG (1 << 10)
+#define ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER (1 << 11)
+#define ERTS_LOCK_FLAGS_CATEGORY_GENERIC (1 << 12)
+#define ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION (1 << 13)
+
+#define ERTS_LOCK_TYPE_SPINLOCK \
+ (ERTS_LOCK_FLAGS_TYPE_SPINLOCK)
+#define ERTS_LOCK_TYPE_RWSPINLOCK \
+ (ERTS_LOCK_TYPE_SPINLOCK | \
+ ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE)
+#define ERTS_LOCK_TYPE_MUTEX \
+ (ERTS_LOCK_FLAGS_TYPE_MUTEX)
+#define ERTS_LOCK_TYPE_RWMUTEX \
+ (ERTS_LOCK_TYPE_MUTEX | \
+ ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE)
+#define ERTS_LOCK_TYPE_PROCLOCK \
+ (ERTS_LOCK_FLAGS_CATEGORY_PROCESS | \
+ ERTS_LOCK_FLAGS_TYPE_PROCLOCK)
+
+/* -- -- */
+
+typedef unsigned short erts_lock_flags_t;
+typedef unsigned short erts_lock_options_t;
+
+/* @brief Gets the type name of the lock, honoring the RW flag if supplied. */
+const char *erts_lock_flags_get_type_name(erts_lock_flags_t flags);
+
+/* @brief Gets a short-form description of the given lock options. (rw/r/w) */
+const char *erts_lock_options_get_short_desc(erts_lock_options_t options);
+
+#endif /* ERTS_LOCK_FLAGS_H__ */
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index b48017d338..8f96dc3d23 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -43,7 +43,9 @@
*
* DONE:
* - erlang:is_map/1
+ * - erlang:is_map_key/2
* - erlang:map_size/1
+ * - erlang:map_get/2
*
* - maps:find/2
* - maps:from_list/1
@@ -91,7 +93,6 @@ static BIF_RETTYPE hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB, int swap_
static Export hashmap_merge_trap_export;
static BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1);
static Uint hashmap_subtree_size(Eterm node);
-static Eterm hashmap_to_list(Process *p, Eterm map);
static Eterm hashmap_keys(Process *p, Eterm map);
static Eterm hashmap_values(Process *p, Eterm map);
static Eterm hashmap_delete(Process *p, Uint32 hx, Eterm key, Eterm node, Eterm *value);
@@ -139,35 +140,6 @@ BIF_RETTYPE map_size_1(BIF_ALIST_1) {
BIF_ERROR(BIF_P, BADMAP);
}
-/* maps:to_list/1 */
-
-BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) {
- if (is_flatmap(BIF_ARG_1)) {
- Uint n;
- Eterm* hp;
- Eterm *ks,*vs, res, tup;
- flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1);
-
- ks = flatmap_get_keys(mp);
- vs = flatmap_get_values(mp);
- n = flatmap_get_size(mp);
- hp = HAlloc(BIF_P, (2 + 3) * n);
- res = NIL;
-
- while(n--) {
- tup = TUPLE2(hp, ks[n], vs[n]); hp += 3;
- res = CONS(hp, tup, res); hp += 2;
- }
-
- BIF_RET(res);
- } else if (is_hashmap(BIF_ARG_1)) {
- return hashmap_to_list(BIF_P, BIF_ARG_1);
- }
-
- BIF_P->fvalue = BIF_ARG_1;
- BIF_ERROR(BIF_P, BADMAP);
-}
-
/* maps:find/2
* return value if key *matches* a key in the map
*/
@@ -232,7 +204,7 @@ BIF_RETTYPE maps_find_2(BIF_ALIST_2) {
BIF_ERROR(BIF_P, BADMAP);
}
-/* maps:get/2
+/* maps:get/2 and erlang:map_get/2
* return value if key *matches* a key in the map
* exception badkey if none matches
*/
@@ -253,6 +225,10 @@ BIF_RETTYPE maps_get_2(BIF_ALIST_2) {
BIF_ERROR(BIF_P, BADMAP);
}
+BIF_RETTYPE map_get_2(BIF_ALIST_2) {
+ BIF_RET(maps_get_2(BIF_CALL_ARGS));
+}
+
/* maps:from_list/1
* List may be unsorted [{K,V}]
*/
@@ -520,7 +496,9 @@ Eterm erts_map_from_ks_and_vs(ErtsHeapFactory *factory, Eterm *ks0, Eterm *vs0,
sys_memcpy(ks, ks0, n * sizeof(Eterm));
sys_memcpy(vs, vs0, n * sizeof(Eterm));
- erts_validate_and_sort_flatmap(mp);
+ if (!erts_validate_and_sort_flatmap(mp)) {
+ return THE_NON_VALUE;
+ }
return make_flatmap(mp);
} else {
@@ -942,7 +920,7 @@ static int hxnodecmp(hxnode_t *a, hxnode_t *b) {
return -1;
}
-/* maps:is_key/2 */
+/* maps:is_key/2 and erlang:is_map_key/2 */
BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) {
if (is_map(BIF_ARG_2)) {
@@ -952,6 +930,10 @@ BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) {
BIF_ERROR(BIF_P, BADMAP);
}
+BIF_RETTYPE is_map_key_2(BIF_ALIST_2) {
+ BIF_RET(maps_is_key_2(BIF_CALL_ARGS));
+}
+
/* maps:keys/1 */
BIF_RETTYPE maps_keys_1(BIF_ALIST_1) {
@@ -1188,16 +1170,17 @@ typedef struct HashmapMergeContext_ {
#endif
} HashmapMergeContext;
-static void hashmap_merge_ctx_destructor(Binary* ctx_bin)
+static int hashmap_merge_ctx_destructor(Binary* ctx_bin)
{
HashmapMergeContext* ctx = (HashmapMergeContext*) ERTS_MAGIC_BIN_DATA(ctx_bin);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor);
PSTACK_DESTROY_SAVED(&ctx->pstack);
+ return 1;
}
BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1) {
- Binary* ctx_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ Binary* ctx_bin = erts_magic_ref2bin(BIF_ARG_1);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor);
@@ -1453,9 +1436,9 @@ trap: /* Yield */
hashmap_merge_ctx_destructor);
ctx = ERTS_MAGIC_BIN_DATA(ctx_b);
sys_memcpy(ctx, &local_ctx, sizeof(HashmapMergeContext));
- hp = HAlloc(p, PROC_BIN_SIZE);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
ASSERT(ctx->trap_bin == THE_NON_VALUE);
- ctx->trap_bin = erts_mk_magic_binary_term(&hp, &MSO(p), ctx_b);
+ ctx->trap_bin = erts_mk_magic_ref(&hp, &MSO(p), ctx_b);
erts_set_gc_state(p, 0);
}
@@ -1916,38 +1899,31 @@ BIF_RETTYPE maps_values_1(BIF_ALIST_1) {
BIF_ERROR(BIF_P, BADMAP);
}
-static Eterm hashmap_to_list(Process *p, Eterm node) {
- DECLARE_WSTACK(stack);
- Eterm *hp, *kv;
- Eterm res = NIL;
-
- hp = HAlloc(p, hashmap_size(node) * (2 + 3));
- hashmap_iterator_init(&stack, node, 0);
- while ((kv=hashmap_iterator_next(&stack)) != NULL) {
- Eterm tup = TUPLE2(hp, CAR(kv), CDR(kv));
- hp += 3;
- res = CONS(hp, tup, res);
- hp += 2;
- }
- DESTROY_WSTACK(stack);
- return res;
-}
-
-void hashmap_iterator_init(ErtsWStack* s, Eterm node, int reverse) {
- Eterm hdr = *hashmap_val(node);
+static ERTS_INLINE
+Uint hashmap_node_size(Eterm hdr, Eterm **nodep)
+{
Uint sz;
switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
case HAMT_SUBTAG_HEAD_ARRAY:
sz = 16;
+ if (nodep) ++*nodep;
break;
case HAMT_SUBTAG_HEAD_BITMAP:
+ if (nodep) ++*nodep;
case HAMT_SUBTAG_NODE_BITMAP:
sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ ASSERT(sz < 17);
break;
default:
erts_exit(ERTS_ABORT_EXIT, "bad header");
}
+ return sz;
+}
+
+void hashmap_iterator_init(ErtsWStack* s, Eterm node, int reverse) {
+ Eterm hdr = *hashmap_val(node);
+ Uint sz = hashmap_node_size(hdr, NULL);
WSTACK_PUSH3((*s), (UWord)THE_NON_VALUE, /* end marker */
(UWord)(!reverse ? 0 : sz+1),
@@ -1971,20 +1947,7 @@ Eterm* hashmap_iterator_next(ErtsWStack* s) {
ptr = boxed_val(node);
hdr = *ptr;
ASSERT(is_header(hdr));
- switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
- case HAMT_SUBTAG_HEAD_ARRAY:
- ptr++;
- sz = 16;
- break;
- case HAMT_SUBTAG_HEAD_BITMAP:
- ptr++;
- case HAMT_SUBTAG_NODE_BITMAP:
- sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
- ASSERT(sz < 17);
- break;
- default:
- erts_exit(ERTS_ABORT_EXIT, "bad header");
- }
+ sz = hashmap_node_size(hdr, &ptr);
idx++;
@@ -2021,20 +1984,7 @@ Eterm* hashmap_iterator_prev(ErtsWStack* s) {
ptr = boxed_val(node);
hdr = *ptr;
ASSERT(is_header(hdr));
- switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
- case HAMT_SUBTAG_HEAD_ARRAY:
- ptr++;
- sz = 16;
- break;
- case HAMT_SUBTAG_HEAD_BITMAP:
- ptr++;
- case HAMT_SUBTAG_NODE_BITMAP:
- sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
- ASSERT(sz < 17);
- break;
- default:
- erts_exit(ERTS_ERROR_EXIT, "bad header");
- }
+ sz = hashmap_node_size(hdr, &ptr);
if (idx > sz)
idx = sz;
@@ -3008,6 +2958,379 @@ static Eterm hashmap_bld_tuple_uint(Uint **hpp, Uint *szp, Uint n, Uint nums[])
}
+/**
+ * In hashmap the Path is a bit pattern that describes
+ * which slot we should traverse in each hashmap node.
+ * Since each hashmap node can only be up to 16 elements
+ * large we use 4 bits per level in the path.
+ *
+ * So a Path with value 0x110 will first get the 0:th
+ * slot in the head node, and then the 1:st slot in the
+ * resulting node and then finally the 1:st slot in the
+ * node beneath. If that slot is not a leaf, then the path
+ * continues down the 0:th slot until it finds a leaf.
+ *
+ * Once the leaf has been found, the return value is created
+ * by traversing the tree using the the stack that was built
+ * when searching for the first leaf to return.
+ *
+ * The index can become a bignum, which complicates the code
+ * a bit. However it should be very rare that this happens
+ * even on a 32bit system as you would need a tree of depth
+ * 7 or more.
+ *
+ * If the number of elements remaining in the map is greater
+ * than how many we want to return, we build a new Path, using
+ * the stack, that points to the next leaf.
+ *
+ * The third argument to this function controls how the data
+ * is returned.
+ *
+ * iterator: The key-value associations are to be used by
+ * maps:iterator. The return has this format:
+ * {K1,V1,{K2,V2,none | [Path | Map]}}
+ * this makes the maps:next function very simple
+ * and performant.
+ *
+ * list(): The key-value associations are to be used by
+ * maps:to_list. The return has this format:
+ * [Path, Map | [{K1,V1},{K2,V2} | BIF_ARG_3]]
+ * or if no more associations remain
+ * [{K1,V1},{K2,V2} | BIF_ARG_3]
+ */
+
+#define PATH_ELEM_SIZE 4
+#define PATH_ELEM_MASK 0xf
+#define PATH_ELEM(PATH) ((PATH) & PATH_ELEM_MASK)
+#define PATH_ELEMS_PER_DIGIT (sizeof(ErtsDigit) * 8 / PATH_ELEM_SIZE)
+
+BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
+
+ Eterm path, map;
+ enum { iterator, list } type;
+
+ path = BIF_ARG_1;
+ map = BIF_ARG_2;
+
+ if (!is_map(map))
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (BIF_ARG_3 == am_iterator) {
+ type = iterator;
+ } else if (is_nil(BIF_ARG_3) || is_list(BIF_ARG_3)) {
+ type = list;
+ } else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (is_flatmap(map)) {
+ Uint n;
+ Eterm *ks,*vs, res, *hp;
+ flatmap_t *mp = (flatmap_t*)flatmap_val(map);
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+ n = flatmap_get_size(mp);
+
+ if (!is_small(BIF_ARG_1) || n < unsigned_val(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (type == iterator) {
+ hp = HAlloc(BIF_P, 4 * n);
+ res = am_none;
+
+ while(n--) {
+ res = TUPLE3(hp, ks[n], vs[n], res); hp += 4;
+ }
+ } else {
+ hp = HAlloc(BIF_P, (2 + 3) * n);
+ res = BIF_ARG_3;
+
+ while(n--) {
+ Eterm tup = TUPLE2(hp, ks[n], vs[n]); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+ }
+ }
+
+ BIF_RET(res);
+ } else {
+ Uint curr_path;
+ Uint path_length = 0;
+ Uint *path_rest = NULL;
+ int i, elems, orig_elems;
+ Eterm node = map, res, *patch_ptr = NULL, *hp;
+
+ /* A stack WSTACK is used when traversing the hashmap.
+ * It contains: node, idx, sz, ptr
+ *
+ * `node` is not really needed, but it is very nice to
+ * have when debugging.
+ *
+ * `idx` always points to the next un-explored entry in
+ * a node. If there are no more un-explored entries,
+ * `idx` is equal to `sz`.
+ *
+ * `sz` is the number of elements in the node.
+ *
+ * `ptr` is a pointer to where the elements of the node begins.
+ */
+ DECLARE_WSTACK(stack);
+
+ ASSERT(is_hashmap(node));
+
+/* How many elements we return in one call depends on the number of reductions
+ * that the process has left to run. In debug we return fewer elements to test
+ * the Path implementation better.
+ *
+ * Also, when the path is 0 (i.e. for the first call) we limit the number of
+ * elements to MAP_SMALL_MAP_LIMIT in order to not use a huge amount of heap
+ * when only the first X associations in the hashmap was needed.
+ */
+#if defined(DEBUG)
+#define FCALLS_ELEMS(BIF_P) ((BIF_P->fcalls / 4) & 0xF)
+#else
+#define FCALLS_ELEMS(BIF_P) (BIF_P->fcalls / 4)
+#endif
+
+ if (MAX(FCALLS_ELEMS(BIF_P), 1) < hashmap_size(map))
+ elems = MAX(FCALLS_ELEMS(BIF_P), 1);
+ else
+ elems = hashmap_size(map);
+
+#undef FCALLS_ELEMS
+
+ if (is_small(path)) {
+ curr_path = unsigned_val(path);
+
+ if (curr_path == 0 && elems > MAP_SMALL_MAP_LIMIT) {
+ elems = MAP_SMALL_MAP_LIMIT;
+ }
+ } else if (is_big(path)) {
+ Eterm *big = big_val(path);
+ if (bignum_header_is_neg(*big))
+ BIF_ERROR(BIF_P, BADARG);
+ path_length = BIG_ARITY(big) - 1;
+ curr_path = BIG_DIGIT(big, 0);
+ path_rest = BIG_V(big) + 1;
+ } else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (type == iterator) {
+ /*
+ * Iterator uses the format {K1, V1, {K2, V2, {K3, V3, [Path | Map]}}},
+ * so each element is 4 words large.
+ * To make iteration order independent of input reductions
+ * the KV-pairs are here built in DESTRUCTIVE non-reverse order.
+ */
+ hp = HAlloc(BIF_P, 4 * elems);
+ } else {
+ /*
+ * List used the format [Path, Map, {K3,V3}, {K2,V2}, {K1,V1} | BIF_ARG_3],
+ * so each element is 2+3 words large.
+ * To make list order independent of input reductions
+ * the KV-pairs are here built in FUNCTIONAL reverse order
+ * as this is how the list as a whole is constructed.
+ */
+ hp = HAlloc(BIF_P, (2 + 3) * elems);
+ }
+
+ orig_elems = elems;
+
+ /* First we look for the leaf to start at using the
+ path given. While doing so, we push each map node
+ and the index onto the stack to use later. */
+ for (i = 1; ; i++) {
+ Eterm *ptr = hashmap_val(node),
+ hdr = *ptr++;
+ Uint sz;
+
+ sz = hashmap_node_size(hdr, &ptr);
+
+ if (PATH_ELEM(curr_path) >= sz)
+ goto badarg;
+
+ WSTACK_PUSH4(stack, node, PATH_ELEM(curr_path)+1, sz, (UWord)ptr);
+
+ /* We have found a leaf, return it and the next X elements */
+ if (is_list(ptr[PATH_ELEM(curr_path)])) {
+ Eterm *lst = list_val(ptr[PATH_ELEM(curr_path)]);
+ if (type == iterator) {
+ res = make_tuple(hp);
+ hp[0] = make_arityval(3);
+ hp[1] = CAR(lst);
+ hp[2] = CDR(lst);
+ patch_ptr = &hp[3];
+ hp += 4;
+ } else {
+ Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3;
+ res = CONS(hp, tup, BIF_ARG_3); hp += 2;
+ }
+ elems--;
+ break;
+ }
+
+ node = ptr[PATH_ELEM(curr_path)];
+
+ curr_path >>= PATH_ELEM_SIZE;
+
+ if (i == PATH_ELEMS_PER_DIGIT) {
+ /* Switch to next bignum word if available,
+ otherwise just follow 0 path */
+ i = 0;
+ if (path_length) {
+ curr_path = *path_rest;
+ path_length--;
+ path_rest++;
+ } else {
+ curr_path = 0;
+ }
+ }
+ }
+
+ /* We traverse the hashmap and return at most `elems` elements */
+ while(1) {
+ Eterm *ptr = (Eterm*)WSTACK_POP(stack);
+ Uint sz = (Uint)WSTACK_POP(stack);
+ Uint idx = (Uint)WSTACK_POP(stack);
+ Eterm node = (Eterm)WSTACK_POP(stack);
+
+ while (idx < sz && elems != 0 && is_list(ptr[idx])) {
+ Eterm *lst = list_val(ptr[idx]);
+ if (type == iterator) {
+ *patch_ptr = make_tuple(hp);
+ hp[0] = make_arityval(3);
+ hp[1] = CAR(lst);
+ hp[2] = CDR(lst);
+ patch_ptr = &hp[3];
+ hp += 4;
+ } else {
+ Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+ }
+ elems--;
+ idx++;
+ }
+
+ if (elems == 0) {
+ if (idx < sz) {
+ /* There are more elements in this node to explore */
+ WSTACK_PUSH4(stack, node, idx+1, sz, (UWord)ptr);
+ } else {
+ /* pop stack to find the next value */
+ while (!WSTACK_ISEMPTY(stack)) {
+ Eterm *ptr = (Eterm*)WSTACK_POP(stack);
+ Uint sz = (Uint)WSTACK_POP(stack);
+ Uint idx = (Uint)WSTACK_POP(stack);
+ Eterm node = (Eterm)WSTACK_POP(stack);
+ if (idx < sz) {
+ WSTACK_PUSH4(stack, node, idx+1, sz, (UWord)ptr);
+ break;
+ }
+ }
+ }
+ break;
+ } else {
+ if (idx < sz) {
+ Eterm hdr;
+ /* Push next idx in current node */
+ WSTACK_PUSH4(stack, node, idx+1, sz, (UWord)ptr);
+
+ /* Push first idx in child node */
+ node = ptr[idx];
+ ptr = hashmap_val(ptr[idx]);
+ hdr = *ptr++;
+ sz = hashmap_node_size(hdr, &ptr);
+ WSTACK_PUSH4(stack, node, 0, sz, (UWord)ptr);
+ }
+ }
+
+ /* There are no more element in the hashmap */
+ if (WSTACK_ISEMPTY(stack)) {
+ break;
+ }
+
+ }
+
+ if (!WSTACK_ISEMPTY(stack)) {
+ Uint depth = WSTACK_COUNT(stack) / 4 + 1;
+ /* +1 because we already have the first element in curr_path */
+ Eterm *path_digits = NULL;
+ Uint curr_path = 0;
+
+ /* If the path cannot fit in a small, we allocate a bignum */
+ if (depth >= PATH_ELEMS_PER_DIGIT) {
+ /* We need multiple ErtsDigit's to represent the path */
+ int big_size = BIG_NEED_FOR_BITS(depth * PATH_ELEM_SIZE);
+ hp = HAlloc(BIF_P, big_size);
+ hp[0] = make_pos_bignum_header(big_size - BIG_NEED_SIZE(0));
+ path_digits = hp + big_size - 1;
+ }
+
+
+ /* Pop the stack to create the complete path to the next leaf */
+ while(!WSTACK_ISEMPTY(stack)) {
+ Uint idx;
+
+ (void)WSTACK_POP(stack);
+ (void)WSTACK_POP(stack);
+ idx = (Uint)WSTACK_POP(stack)-1;
+ /* idx - 1 because idx in the stack is pointing to
+ the next element to fetch. */
+ (void)WSTACK_POP(stack);
+
+ depth--;
+ if (depth % PATH_ELEMS_PER_DIGIT == 0) {
+ /* Switch to next bignum element */
+ path_digits[0] = curr_path;
+ path_digits--;
+ curr_path = 0;
+ }
+
+ curr_path <<= PATH_ELEM_SIZE;
+ curr_path |= idx;
+ }
+
+ if (path_digits) {
+ path_digits[0] = curr_path;
+ path = make_big(hp);
+ } else {
+ /* The Uint could be too large for a small */
+ path = erts_make_integer(curr_path, BIF_P);
+ }
+
+ if (type == iterator) {
+ hp = HAlloc(BIF_P, 2);
+ *patch_ptr = CONS(hp, path, map); hp += 2;
+ } else {
+ hp = HAlloc(BIF_P, 4);
+ res = CONS(hp, map, res); hp += 2;
+ res = CONS(hp, path, res); hp += 2;
+ }
+ } else {
+ if (type == iterator) {
+ *patch_ptr = am_none;
+ HRelease(BIF_P, hp + 4 * elems, hp);
+ } else {
+ HRelease(BIF_P, hp + (2+3) * elems, hp);
+ }
+ }
+ BIF_P->fcalls -= 4 * (orig_elems - elems);
+ DESTROY_WSTACK(stack);
+ BIF_RET(res);
+
+ badarg:
+ if (type == iterator) {
+ HRelease(BIF_P, hp + 4 * elems, hp);
+ } else {
+ HRelease(BIF_P, hp + (2+3) * elems, hp);
+ }
+ BIF_P->fcalls -= 4 * (orig_elems - elems);
+ DESTROY_WSTACK(stack);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+}
+
/* implementation of builtin emulations */
#if !ERTS_AT_LEAST_GCC_VSN__(3, 4, 0)
diff --git a/erts/emulator/beam/erl_map.h b/erts/emulator/beam/erl_map.h
index 61a841f7f0..718d400e22 100644
--- a/erts/emulator/beam/erl_map.h
+++ b/erts/emulator/beam/erl_map.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -57,14 +57,13 @@ typedef struct flatmap_s {
#define hashmap_size(x) (((hashmap_head_t*) hashmap_val(x))->size)
-#define hashmap_make_hash(Key) make_internal_hash(Key)
+#define hashmap_make_hash(Key) make_internal_hash(Key, 0)
#define hashmap_restore_hash(Heap,Lvl,Key) \
(((Lvl) < 8) ? hashmap_make_hash(Key) >> (4*(Lvl)) : hashmap_make_hash(CONS(Heap, make_small((Lvl)>>3), (Key))) >> (4*((Lvl) & 7)))
#define hashmap_shift_hash(Heap,Hx,Lvl,Key) \
(((++(Lvl)) & 7) ? (Hx) >> 4 : hashmap_make_hash(CONS(Heap, make_small((Lvl)>>3), Key)))
-
/* erl_term.h stuff */
#define flatmap_get_values(x) (((Eterm *)(x)) + sizeof(flatmap_t)/sizeof(Eterm))
#define flatmap_get_keys(x) (((Eterm *)tuple_val(((flatmap_t *)(x))->keys)) + 1)
diff --git a/erts/emulator/beam/erl_math.c b/erts/emulator/beam/erl_math.c
index fc0aaed18a..1f270eb55f 100644
--- a/erts/emulator/beam/erl_math.c
+++ b/erts/emulator/beam/erl_math.c
@@ -247,6 +247,17 @@ BIF_RETTYPE math_pow_2(BIF_ALIST_2)
return math_call_2(BIF_P, pow, BIF_ARG_1, BIF_ARG_2);
}
+BIF_RETTYPE math_ceil_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, ceil, BIF_ARG_1);
+}
+BIF_RETTYPE math_floor_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, floor, BIF_ARG_1);
+}
-
+BIF_RETTYPE math_fmod_2(BIF_ALIST_2)
+{
+ return math_call_2(BIF_P, fmod, BIF_ARG_1, BIF_ARG_2);
+}
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index e4c696ae3b..a3274d7443 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -33,6 +33,7 @@
#include "erl_binary.h"
#include "dtrace-wrapper.h"
#include "beam_bp.h"
+#include "erl_proc_sig_queue.h"
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message_ref,
ErtsMessageRef,
@@ -167,15 +168,17 @@ erts_cleanup_offheap(ErlOffHeap *offheap)
for (u.hdr = offheap->first; u.hdr; u.hdr = u.hdr->next) {
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
- erts_bin_free(u.pb->val);
- }
+ erts_bin_release(u.pb->val);
break;
case FUN_SUBTAG:
if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) {
erts_erase_fun_entry(u.fun->fe);
}
break;
+ case REF_SUBTAG:
+ ASSERT(is_magic_ref_thing(u.hdr));
+ erts_bin_release((Binary *)u.mref->mb);
+ break;
default:
ASSERT(is_external_header(u.hdr->thing_word));
erts_deref_node_entry(u.ext->node);
@@ -193,7 +196,7 @@ free_message_buffer(ErlHeapFragment* bp)
erts_cleanup_offheap(&bp->off_heap);
ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
- ERTS_HEAP_FRAG_SIZE(bp->size));
+ ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
bp = next_bp;
}while (bp != NULL);
}
@@ -205,7 +208,7 @@ erts_cleanup_messages(ErtsMessage *msgp)
while (mp) {
ErtsMessage *fmp;
ErlHeapFragment *bp;
- if (is_non_value(ERL_MESSAGE_TERM(mp))) {
+ if (ERTS_SIG_IS_EXTERNAL_MSG(mp)) {
if (is_not_immed(ERL_MESSAGE_TOKEN(mp))) {
bp = (ErlHeapFragment *) mp->data.dist_ext->ext_endp;
erts_cleanup_offheap(&bp->off_heap);
@@ -213,10 +216,13 @@ erts_cleanup_messages(ErtsMessage *msgp)
if (mp->data.dist_ext)
erts_free_dist_ext_copy(mp->data.dist_ext);
}
- else {
- if (mp->data.attached != ERTS_MSG_COMBINED_HFRAG)
+ else {
+ if (ERTS_SIG_IS_INTERNAL_MSG(mp)
+ && mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
bp = mp->data.heap_frag;
+ }
else {
+ mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
bp = mp->hfrag.next;
erts_cleanup_offheap(&mp->hfrag.off_heap);
}
@@ -258,20 +264,14 @@ erts_queue_dist_message(Process *rcvr,
Eterm from)
{
ErtsMessage* mp;
-#ifdef USE_VM_PROBES
- Sint tok_label = 0;
- Sint tok_lastcnt = 0;
- Sint tok_serial = 0;
-#endif
-#ifdef ERTS_SMP
erts_aint_t state;
-#endif
- ERTS_SMP_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
+ ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
mp = erts_alloc_message(0, NULL);
mp->data.dist_ext = dist_ext;
+ ERL_MESSAGE_FROM(mp) = dist_ext->dep->sysname;
ERL_MESSAGE_TERM(mp) = THE_NON_VALUE;
#ifdef USE_VM_PROBES
ERL_MESSAGE_DT_UTAG(mp) = NIL;
@@ -281,247 +281,182 @@ erts_queue_dist_message(Process *rcvr,
#endif
ERL_MESSAGE_TOKEN(mp) = token;
-#ifdef ERTS_SMP
if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) {
- if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+ if (erts_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
- if (rcvr_locks & ERTS_PROC_LOCK_STATUS) {
- erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS);
- need_locks |= ERTS_PROC_LOCK_STATUS;
+ ErtsProcLocks unlocks =
+ rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ);
+ if (unlocks) {
+ erts_proc_unlock(rcvr, unlocks);
+ need_locks |= unlocks;
}
- erts_smp_proc_lock(rcvr, need_locks);
+ erts_proc_lock(rcvr, need_locks);
}
}
- state = erts_smp_atomic32_read_acqb(&rcvr->state);
- if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) {
+
+ state = erts_atomic32_read_acqb(&rcvr->state);
+ if (state & ERTS_PSFLG_EXITING) {
if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
- erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
/* Drop message if receiver is exiting or has a pending exit ... */
erts_cleanup_messages(mp);
}
- else
-#endif
- if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) {
- if (from == am_Empty)
- from = dist_ext->dep->sysname;
-
- /* Ahh... need to decode it in order to trace it... */
- if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
- erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
- if (!erts_decode_dist_message(rcvr, rcvr_locks, mp, 0))
- erts_free_message(mp);
- else {
- Eterm msg = ERL_MESSAGE_TERM(mp);
- token = ERL_MESSAGE_TOKEN(mp);
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(message_queued)) {
- DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
-
- dtrace_proc_str(rcvr, receiver_name);
- if (have_seqtrace(token)) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
- tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
- tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
- }
- DTRACE6(message_queued,
- receiver_name, size_object(msg), rcvr->msg.len,
- tok_label, tok_lastcnt, tok_serial);
- }
-#endif
- erts_queue_message(rcvr, rcvr_locks, mp, msg, from);
- }
- }
else {
- /* Enqueue message on external format */
-
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(message_queued)) {
- DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
-
- dtrace_proc_str(rcvr, receiver_name);
- if (have_seqtrace(token)) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
- tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
- tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
- }
- /*
- * TODO: We don't know the real size of the external message here.
- * -1 will appear to a D script as 4294967295.
- */
- DTRACE6(message_queued, receiver_name, -1, rcvr->msg.len + 1,
- tok_label, tok_lastcnt, tok_serial);
- }
-#endif
+ LINK_MESSAGE(rcvr, mp);
- LINK_MESSAGE(rcvr, mp, &mp->next, 1);
+ if (rcvr_locks & ERTS_PROC_LOCK_MAIN)
+ erts_proc_sig_fetch(rcvr);
if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
- erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(rcvr,
-#ifdef ERTS_SMP
- rcvr_locks
-#else
- 0
-#endif
- );
+ erts_proc_notify_new_message(rcvr, rcvr_locks);
}
}
/* Add messages last in message queue */
-static Sint
+static void
queue_messages(Process* receiver,
- erts_aint32_t *receiver_state,
ErtsProcLocks receiver_locks,
ErtsMessage* first,
ErtsMessage** last,
- Uint len,
- Eterm from)
+ Uint len)
{
- ErtsTracingEvent* te;
- Sint res;
int locked_msgq = 0;
erts_aint32_t state;
- ASSERT(is_value(ERL_MESSAGE_TERM(first)));
- ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined ||
- ERL_MESSAGE_TOKEN(first) == NIL ||
- is_tuple(ERL_MESSAGE_TOKEN(first)));
-
-#ifdef ERTS_SMP
-#ifdef ERTS_ENABLE_LOCK_CHECK
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ ||
- receiver_locks == erts_proc_lc_my_proc_locks(receiver));
+#ifdef DEBUG
+ {
+ ErtsMessage* fmsg = ERTS_SIG_IS_MSG(first) ? first : first->next;
+ ASSERT(fmsg);
+ ASSERT(is_value(ERL_MESSAGE_TERM(fmsg)));
+ ASSERT(is_value(ERL_MESSAGE_FROM(fmsg)));
+ ASSERT(ERL_MESSAGE_TOKEN(fmsg) == am_undefined ||
+ ERL_MESSAGE_TOKEN(fmsg) == NIL ||
+ is_tuple(ERL_MESSAGE_TOKEN(fmsg)));
+ }
#endif
- if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
- if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
- ErtsProcLocks need_locks;
-
- if (receiver_state)
- state = *receiver_state;
- else
- state = erts_smp_atomic32_read_nob(&receiver->state);
- if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
- goto exiting;
+ ERTS_LC_ASSERT((erts_proc_lc_my_proc_locks(receiver) & ERTS_PROC_LOCK_MSGQ)
+ == (receiver_locks & ERTS_PROC_LOCK_MSGQ));
- need_locks = receiver_locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
- if (need_locks) {
- erts_smp_proc_unlock(receiver, need_locks);
- }
- need_locks |= ERTS_PROC_LOCK_MSGQ;
- erts_smp_proc_lock(receiver, need_locks);
- }
+ if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
+ erts_proc_lock(receiver, ERTS_PROC_LOCK_MSGQ);
locked_msgq = 1;
}
-#endif
-
- state = erts_smp_atomic32_read_nob(&receiver->state);
+ state = erts_atomic32_read_nob(&receiver->state);
- if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) {
-#ifdef ERTS_SMP
- exiting:
-#endif
+ if (state & ERTS_PSFLG_EXITING) {
/* Drop message if receiver is exiting or has a pending exit... */
if (locked_msgq)
- erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
+ if (ERTS_SIG_IS_NON_MSG(first)) {
+ ErtsSchedulerData* esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+ ASSERT(!esdp->pending_signal.sig);
+ esdp->pending_signal.sig = (ErtsSignal*) first;
+ esdp->pending_signal.to = receiver->common.id;
+ first = first->next;
+ }
erts_cleanup_messages(first);
- return 0;
+ return;
}
- res = receiver->msg.len;
-#ifdef ERTS_SMP
- if (receiver_locks & ERTS_PROC_LOCK_MAIN) {
- /*
- * We move 'in queue' to 'private queue' and place
- * message at the end of 'private queue' in order
- * to ensure that the 'in queue' doesn't contain
- * references into the heap. By ensuring this,
- * we don't need to include the 'in queue' in
- * the root set when garbage collecting.
- */
- res += receiver->msg_inq.len;
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver);
- LINK_MESSAGE_PRIVQ(receiver, first, last, len);
+ if (last == &first->next) {
+ ASSERT(len == 1);
+ LINK_MESSAGE(receiver, first);
}
- else
-#endif
- {
- LINK_MESSAGE(receiver, first, last, len);
+ else {
+ erts_enqueue_signals(receiver, first, last, NULL, len, state);
}
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)
- && (te = &erts_receive_tracing[erts_active_bp_ix()],
- te->on)) {
-
- ErtsMessage *msg = first;
-
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(message_queued)) {
- DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
- Sint tok_label = 0;
- Sint tok_lastcnt = 0;
- Sint tok_serial = 0;
- Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg);
-
- dtrace_proc_str(receiver, receiver_name);
- if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token));
- tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
- tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
- }
- DTRACE6(message_queued,
- receiver_name, size_object(ERL_MESSAGE_TERM(msg)),
- receiver->msg.len,
- tok_label, tok_lastcnt, tok_serial);
- }
-#endif
- while (msg) {
- trace_receive(receiver, from, ERL_MESSAGE_TERM(msg), te);
- msg = msg->next;
- }
+ if (receiver_locks & ERTS_PROC_LOCK_MAIN)
+ erts_proc_sig_fetch(receiver);
- }
if (locked_msgq) {
- erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
}
-#ifdef ERTS_SMP
- erts_proc_notify_new_message(receiver, receiver_locks);
-#else
- erts_proc_notify_new_message(receiver, 0);
-#endif
- return res;
+ if (last == &first->next)
+ erts_proc_notify_new_message(receiver, receiver_locks);
+ else
+ erts_proc_notify_new_sig(receiver, state, ERTS_PSFLG_ACTIVE);
}
-static Sint
-queue_message(Process* receiver,
- erts_aint32_t *receiver_state,
- ErtsProcLocks receiver_locks,
- ErtsMessage* mp, Eterm msg, Eterm from)
+static ERTS_INLINE
+ErtsMessage* prepend_pending_sig_maybe(Process* sender, Process* receiver,
+ ErtsMessage* mp)
{
- ERL_MESSAGE_TERM(mp) = msg;
- return queue_messages(receiver, receiver_state, receiver_locks,
- mp, &mp->next, 1, from);
+ ErtsSchedulerData* esdp = sender->scheduler_data;
+ ErtsSignal* pend_sig;
+
+ if (!esdp || esdp->pending_signal.to != receiver->common.id)
+ return mp;
+
+ pend_sig = esdp->pending_signal.sig;
+
+ ASSERT(esdp->pending_signal.dbg_from == sender);
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+ pend_sig->common.next = mp;
+ pend_sig->common.specific.next = NULL;
+ return (ErtsMessage*) pend_sig;
}
-Sint
+/**
+ *
+ * @brief Send one message from *NOT* a local process.
+ *
+ * seq_trace does not work with this type of messages
+ * to it is set to am_undefined which means that the
+ * receiving process will not remove the seq_trace token
+ * when it gets this message.
+ *
+ */
+void
erts_queue_message(Process* receiver, ErtsProcLocks receiver_locks,
ErtsMessage* mp, Eterm msg, Eterm from)
{
- return queue_message(receiver, NULL, receiver_locks, mp, msg, from);
+ ASSERT(is_not_internal_pid(from));
+ ERL_MESSAGE_TERM(mp) = msg;
+ ERL_MESSAGE_FROM(mp) = from;
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ queue_messages(receiver, receiver_locks, mp, &mp->next, 1);
+}
+
+/**
+ * @brief Send one message from a local process.
+ *
+ * It is up to the caller of this function to set the
+ * correct seq_trace. The general rule of thumb is that
+ * it should be set to am_undefined if the message
+ * cannot be traced using seq_trace, if it can be
+ * traced it should be set to the trace token. It should
+ * very rarely be explicitly set to NIL!
+ */
+void
+erts_queue_proc_message(Process* sender,
+ Process* receiver, ErtsProcLocks receiver_locks,
+ ErtsMessage* mp, Eterm msg)
+{
+ ERL_MESSAGE_TERM(mp) = msg;
+ ERL_MESSAGE_FROM(mp) = sender->common.id;
+ queue_messages(receiver, receiver_locks,
+ prepend_pending_sig_maybe(sender, receiver, mp),
+ &mp->next, 1);
}
-Sint
-erts_queue_messages(Process* receiver, ErtsProcLocks receiver_locks,
- ErtsMessage* first, ErtsMessage** last, Uint len,
- Eterm from)
+void
+erts_queue_proc_messages(Process* sender,
+ Process* receiver, ErtsProcLocks receiver_locks,
+ ErtsMessage* first, ErtsMessage** last, Uint len)
{
- return queue_messages(receiver, NULL, receiver_locks,
- first, last, len, from);
+ queue_messages(receiver, receiver_locks,
+ prepend_pending_sig_maybe(sender, receiver, first),
+ last, len);
}
void
@@ -564,14 +499,11 @@ erts_msg_attached_data_size_aux(ErtsMessage *msg)
sz = erts_decode_dist_ext_size(msg->data.dist_ext);
if (sz < 0) {
- /* Bad external; remove it */
- if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
- ErlHeapFragment *heap_frag;
- heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
- erts_cleanup_offheap(&heap_frag->off_heap);
- }
- erts_free_dist_ext_copy(msg->data.dist_ext);
- msg->data.dist_ext = NULL;
+ /* Bad external
+ * We leave the message intact in this case as it's not worth the trouble
+ * to make all callers remove it from queue. It will be detected again
+ * and removed from message queue later anyway.
+ */
return 0;
}
@@ -593,9 +525,7 @@ erts_try_alloc_message_on_heap(Process *pp,
ErlOffHeap **ohpp,
int *on_heap_p)
{
-#ifdef ERTS_SMP
int locked_main = 0;
-#endif
ErtsMessage *mp;
ASSERT(!(*psp & ERTS_PSFLG_OFF_HEAP_MSGQ));
@@ -603,15 +533,9 @@ erts_try_alloc_message_on_heap(Process *pp,
if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
goto in_message_fragment;
else if (
-#if defined(ERTS_SMP)
*plp & ERTS_PROC_LOCK_MAIN
-#else
- pp
-#endif
) {
-#ifdef ERTS_SMP
try_on_heap:
-#endif
if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
|| (pp->flags & F_DISABLE_GC)
|| HEAP_LIMIT(pp) - HEAP_TOP(pp) <= sz) {
@@ -619,12 +543,10 @@ erts_try_alloc_message_on_heap(Process *pp,
* The heap is either potentially in an inconsistent
* state, or not large enough.
*/
-#ifdef ERTS_SMP
if (locked_main) {
*plp &= ~ERTS_PROC_LOCK_MAIN;
- erts_smp_proc_unlock(pp, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(pp, ERTS_PROC_LOCK_MAIN);
}
-#endif
goto in_message_fragment;
}
@@ -635,17 +557,15 @@ erts_try_alloc_message_on_heap(Process *pp,
mp->data.attached = NULL;
*on_heap_p = !0;
}
-#ifdef ERTS_SMP
- else if (pp && erts_smp_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) {
+ else if (pp && erts_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) {
locked_main = 1;
- *psp = erts_smp_atomic32_read_nob(&pp->state);
+ *psp = erts_atomic32_read_nob(&pp->state);
*plp |= ERTS_PROC_LOCK_MAIN;
goto try_on_heap;
}
-#endif
else {
in_message_fragment:
- if (!((*psp) & ERTS_PSFLG_ON_HEAP_MSGQ)) {
+ if ((*psp) & ERTS_PSFLG_OFF_HEAP_MSGQ) {
mp = erts_alloc_message(sz, hpp);
*ohpp = sz == 0 ? NULL : &mp->hfrag.off_heap;
}
@@ -673,18 +593,16 @@ erts_try_alloc_message_on_heap(Process *pp,
* Send a local message when sender & receiver processes are known.
*/
-Sint
+void
erts_send_message(Process* sender,
Process* receiver,
ErtsProcLocks *receiver_locks,
- Eterm message,
- unsigned flags)
+ Eterm message)
{
Uint msize;
ErtsMessage* mp;
ErlOffHeap *ohp;
Eterm token = NIL;
- Sint res = 0;
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(sender_name, 64);
DTRACE_CHARBUF(receiver_name, 64);
@@ -696,6 +614,9 @@ erts_send_message(Process* sender,
erts_aint32_t receiver_state;
#ifdef SHCOPY_SEND
erts_shcopy_t info;
+#else
+ erts_literal_area_t litarea;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif
#ifdef USE_VM_PROBES
@@ -708,9 +629,9 @@ erts_send_message(Process* sender,
}
#endif
- receiver_state = erts_smp_atomic32_read_nob(&receiver->state);
+ receiver_state = erts_atomic32_read_nob(&receiver->state);
- if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
+ if (SEQ_TRACE_TOKEN(sender) != NIL) {
Eterm* hp;
Eterm stoken = SEQ_TRACE_TOKEN(sender);
Uint seq_trace_size = 0;
@@ -725,9 +646,10 @@ erts_send_message(Process* sender,
*/
if (have_seqtrace(stoken)) {
seq_trace_update_send(sender);
- seq_trace_output(stoken, message, SEQ_TRACE_SEND,
+ seq_trace_output(stoken, message, SEQ_TRACE_SEND,
receiver->common.id, sender);
- seq_trace_size = 6; /* TUPLE5 */
+
+ seq_trace_size = size_object(stoken);
}
#ifdef USE_VM_PROBES
if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) {
@@ -741,7 +663,7 @@ erts_send_message(Process* sender,
INITIALIZE_SHCOPY(info);
msize = copy_shared_calculate(message, &info);
#else
- msize = size_object(message);
+ msize = size_object_litopt(message, &litarea);
#endif
mp = erts_alloc_message_heap_state(receiver,
&receiver_state,
@@ -760,7 +682,7 @@ erts_send_message(Process* sender,
DESTROY_SHCOPY(info);
#else
if (is_not_immed(message))
- message = copy_struct(message, msize, &hp, ohp);
+ message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
if (is_immed(stoken))
token = stoken;
@@ -776,7 +698,7 @@ erts_send_message(Process* sender,
}
if (DTRACE_ENABLED(message_send)) {
if (have_seqtrace(stoken)) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(stoken));
+ tok_label = SEQ_TRACE_T_DTRACE_LABEL(stoken);
tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken));
tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken));
}
@@ -796,7 +718,7 @@ erts_send_message(Process* sender,
INITIALIZE_SHCOPY(info);
msize = copy_shared_calculate(message, &info);
#else
- msize = size_object(message);
+ msize = size_object_litopt(message, &litarea);
#endif
mp = erts_alloc_message_heap_state(receiver,
&receiver_state,
@@ -810,7 +732,7 @@ erts_send_message(Process* sender,
DESTROY_SHCOPY(info);
#else
if (is_not_immed(message))
- message = copy_struct(message, msize, &hp, ohp);
+ message = copy_struct_litopt(message, msize, &hp, ohp, &litarea);
#endif
}
#ifdef USE_VM_PROBES
@@ -823,13 +745,8 @@ erts_send_message(Process* sender,
#ifdef USE_VM_PROBES
ERL_MESSAGE_DT_UTAG(mp) = utag;
#endif
- res = queue_message(receiver,
- &receiver_state,
- *receiver_locks,
- mp, message,
- sender->common.id);
- return res;
+ erts_queue_proc_message(sender, receiver, *receiver_locks, mp, message);
}
@@ -948,70 +865,77 @@ Sint
erts_move_messages_off_heap(Process *c_p)
{
int reds = 1;
+ int i;
+ ErtsMessage *msgq[] = {c_p->sig_qs.first, c_p->sig_qs.cont};
/*
* Move all messages off heap. This *only* occurs when the
* process had off heap message disabled and just enabled
* it...
*/
- ErtsMessage *mp;
- reds += c_p->msg.len / 10;
+ reds += erts_proc_sig_privqs_len(c_p) / 10;
- ASSERT(erts_smp_atomic32_read_nob(&c_p->state)
+ ASSERT(erts_atomic32_read_nob(&c_p->state)
& ERTS_PSFLG_OFF_HEAP_MSGQ);
ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG);
- for (mp = c_p->msg.first; mp; mp = mp->next) {
- Uint msg_sz, token_sz;
+ for (i = 0; i < sizeof(msgq)/sizeof(msgq[0]); i++) {
+ ErtsMessage *mp;
+ for (mp = msgq[i]; mp; mp = mp->next) {
+ Uint msg_sz, token_sz;
#ifdef USE_VM_PROBES
- Uint utag_sz;
+ Uint utag_sz;
#endif
- Eterm *hp;
- ErlHeapFragment *hfrag;
+ Eterm *hp;
+ ErlHeapFragment *hfrag;
- if (mp->data.attached)
- continue;
+ if (!ERTS_SIG_IS_INTERNAL_MSG(mp))
+ continue;
- if (is_immed(ERL_MESSAGE_TERM(mp))
+ if (mp->data.attached)
+ continue;
+
+ if (is_immed(ERL_MESSAGE_TERM(mp))
#ifdef USE_VM_PROBES
- && is_immed(ERL_MESSAGE_DT_UTAG(mp))
+ && is_immed(ERL_MESSAGE_DT_UTAG(mp))
#endif
- && is_not_immed(ERL_MESSAGE_TOKEN(mp)))
- continue;
+ && is_not_immed(ERL_MESSAGE_TOKEN(mp)))
+ continue;
- /*
- * The message refers into the heap. Copy the message
- * from the heap into a heap fragment and attach
- * it to the message...
- */
- msg_sz = size_object(ERL_MESSAGE_TERM(mp));
+ /*
+ * The message refers into the heap. Copy the message
+ * from the heap into a heap fragment and attach
+ * it to the message...
+ */
+ msg_sz = size_object(ERL_MESSAGE_TERM(mp));
#ifdef USE_VM_PROBES
- utag_sz = size_object(ERL_MESSAGE_DT_UTAG(mp));
+ utag_sz = size_object(ERL_MESSAGE_DT_UTAG(mp));
#endif
- token_sz = size_object(ERL_MESSAGE_TOKEN(mp));
+ token_sz = size_object(ERL_MESSAGE_TOKEN(mp));
- hfrag = new_message_buffer(msg_sz
+ hfrag = new_message_buffer(msg_sz
#ifdef USE_VM_PROBES
- + utag_sz
+ + utag_sz
#endif
- + token_sz);
- hp = hfrag->mem;
- if (is_not_immed(ERL_MESSAGE_TERM(mp)))
- ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp),
- msg_sz, &hp,
- &hfrag->off_heap);
- if (is_not_immed(ERL_MESSAGE_TOKEN(mp)))
- ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp),
- token_sz, &hp,
- &hfrag->off_heap);
+ + token_sz);
+ hp = hfrag->mem;
+ if (is_not_immed(ERL_MESSAGE_TERM(mp)))
+ ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp),
+ msg_sz, &hp,
+ &hfrag->off_heap);
+ if (is_not_immed(ERL_MESSAGE_TOKEN(mp)))
+ ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp),
+ token_sz, &hp,
+ &hfrag->off_heap);
#ifdef USE_VM_PROBES
- if (is_not_immed(ERL_MESSAGE_DT_UTAG(mp)))
- ERL_MESSAGE_DT_UTAG(mp) = copy_struct(ERL_MESSAGE_DT_UTAG(mp),
- utag_sz, &hp,
- &hfrag->off_heap);
+ if (is_not_immed(ERL_MESSAGE_DT_UTAG(mp)))
+ ERL_MESSAGE_DT_UTAG(mp) = copy_struct(ERL_MESSAGE_DT_UTAG(mp),
+ utag_sz, &hp,
+ &hfrag->off_heap);
#endif
- mp->data.heap_frag = hfrag;
- reds += 1;
+ mp->data.heap_frag = hfrag;
+ reds += 1;
+ }
}
return reds;
@@ -1022,9 +946,9 @@ erts_complete_off_heap_message_queue_change(Process *c_p)
{
int reds = 1;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG);
- ASSERT(erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ);
+ ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ);
/*
* This job was first initiated when the process changed to off heap
@@ -1036,13 +960,13 @@ erts_complete_off_heap_message_queue_change(Process *c_p)
*/
if (!(c_p->flags & F_OFF_HEAP_MSGQ))
- erts_smp_atomic32_read_band_nob(&c_p->state,
+ erts_atomic32_read_band_nob(&c_p->state,
~ERTS_PSFLG_OFF_HEAP_MSGQ);
else {
reds += 2;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
reds += erts_move_messages_off_heap(c_p);
}
c_p->flags &= ~F_OFF_HEAP_MSGQ_CHNG;
@@ -1079,16 +1003,16 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
#ifdef DEBUG
if (c_p->flags & F_OFF_HEAP_MSGQ) {
- ASSERT(erts_smp_atomic32_read_nob(&c_p->state)
+ ASSERT(erts_atomic32_read_nob(&c_p->state)
& ERTS_PSFLG_OFF_HEAP_MSGQ);
}
else {
if (c_p->flags & F_OFF_HEAP_MSGQ_CHNG) {
- ASSERT(erts_smp_atomic32_read_nob(&c_p->state)
+ ASSERT(erts_atomic32_read_nob(&c_p->state)
& ERTS_PSFLG_OFF_HEAP_MSGQ);
}
else {
- ASSERT(!(erts_smp_atomic32_read_nob(&c_p->state)
+ ASSERT(!(erts_atomic32_read_nob(&c_p->state)
& ERTS_PSFLG_OFF_HEAP_MSGQ));
}
}
@@ -1105,8 +1029,6 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
case am_on_heap:
c_p->flags |= F_ON_HEAP_MSGQ;
c_p->flags &= ~F_OFF_HEAP_MSGQ;
- erts_smp_atomic32_read_bor_nob(&c_p->state,
- ERTS_PSFLG_ON_HEAP_MSGQ);
/*
* We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ
* if a off heap change is ongoing. It will be adjusted
@@ -1114,7 +1036,7 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
*/
if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) {
/* Safe to clear ERTS_PSFLG_OFF_HEAP_MSGQ... */
- erts_smp_atomic32_read_band_nob(&c_p->state,
+ erts_atomic32_read_band_nob(&c_p->state,
~ERTS_PSFLG_OFF_HEAP_MSGQ);
}
break;
@@ -1132,8 +1054,6 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
break;
case am_off_heap:
c_p->flags &= ~F_ON_HEAP_MSGQ;
- erts_smp_atomic32_read_band_nob(&c_p->state,
- ~ERTS_PSFLG_ON_HEAP_MSGQ);
goto change_to_off_heap;
default:
res = THE_NON_VALUE; /* badarg */
@@ -1167,7 +1087,7 @@ change_to_off_heap:
* change has completed, GC does not need to inspect
* the message queue at all.
*/
- erts_smp_atomic32_read_bor_nob(&c_p->state,
+ erts_atomic32_read_bor_nob(&c_p->state,
ERTS_PSFLG_OFF_HEAP_MSGQ);
c_p->flags |= F_OFF_HEAP_MSGQ_CHNG;
cohmq = erts_alloc(ERTS_ALC_T_MSGQ_CHNG,
@@ -1255,139 +1175,6 @@ erts_decode_dist_message(Process *proc, ErtsProcLocks proc_locks,
return 1;
}
-/*
- * ERTS_INSPECT_MSGQ_KEEP_OH_MSGS == 0 will move off heap messages
- * into the heap of the inspected process if off_heap_message_queue
- * is false when process_info(_, messages) is called. That is, the
- * following GC will have more data in the rootset compared to the
- * scenario when process_info(_, messages) had not been called.
- *
- * ERTS_INSPECT_MSGQ_KEEP_OH_MSGS != 0 will keep off heap messages
- * off heap when process_info(_, messages) is called regardless of
- * the off_heap_message_queue setting of the process. That is, it
- * will change the following execution of the process as little as
- * possible.
- */
-#define ERTS_INSPECT_MSGQ_KEEP_OH_MSGS 1
-
-Uint
-erts_prep_msgq_for_inspection(Process *c_p, Process *rp,
- ErtsProcLocks rp_locks, ErtsMessageInfo *mip)
-{
- Uint tot_heap_size;
- ErtsMessage* mp;
- Sint i;
- int self_on_heap;
-
- /*
- * Prepare the message queue for inspection
- * by process_info().
- *
- *
- * - Decode all messages on external format
- * - Remove all corrupt dist messages from queue
- * - Save pointer to, and heap size need of each
- * message in the mip array.
- * - Return total heap size need for all messages
- * that needs to be copied.
- *
- * If ERTS_INSPECT_MSGQ_KEEP_OH_MSGS == 0:
- * - In case off heap messages is disabled and
- * we are inspecting our own queue, move all
- * off heap data into the heap.
- */
-
- self_on_heap = c_p == rp && !(c_p->flags & F_OFF_HEAP_MSGQ);
-
- tot_heap_size = 0;
- i = 0;
- mp = rp->msg.first;
- while (mp) {
- Eterm msg = ERL_MESSAGE_TERM(mp);
-
- mip[i].size = 0;
-
- if (is_non_value(msg)) {
- /* Dist message on external format; decode it... */
- if (mp->data.attached)
- erts_decode_dist_message(rp, rp_locks, mp,
- ERTS_INSPECT_MSGQ_KEEP_OH_MSGS);
-
- msg = ERL_MESSAGE_TERM(mp);
-
- if (is_non_value(msg)) {
- ErtsMessage **mpp;
- ErtsMessage *bad_mp = mp;
- /*
- * Bad distribution message; remove
- * it from the queue...
- */
- ASSERT(!mp->data.attached);
-
- mpp = i == 0 ? &rp->msg.first : &mip[i-1].msgp->next;
-
- ASSERT(*mpp == bad_mp);
-
- erts_msgq_update_internal_pointers(&rp->msg, mpp, &bad_mp->next);
-
- mp = mp->next;
- *mpp = mp;
- rp->msg.len--;
- bad_mp->next = NULL;
- erts_cleanup_messages(bad_mp);
- continue;
- }
- }
-
- ASSERT(is_value(msg));
-
-#if ERTS_INSPECT_MSGQ_KEEP_OH_MSGS
- if (is_not_immed(msg) && (!self_on_heap || mp->data.attached)) {
- Uint sz = size_object(msg);
- mip[i].size = sz;
- tot_heap_size += sz;
- }
-#else
- if (self_on_heap) {
- if (mp->data.attached) {
- ErtsMessage *tmp = NULL;
- if (mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
- erts_link_mbuf_to_proc(rp, mp->data.heap_frag);
- mp->data.attached = NULL;
- }
- else {
- /*
- * Need to replace the message reference since
- * we will get references to the message data
- * from the heap...
- */
- ErtsMessage **mpp;
- tmp = erts_alloc_message(0, NULL);
- sys_memcpy((void *) tmp->m, (void *) mp->m,
- sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ);
- mpp = i == 0 ? &rp->msg.first : &mip[i-1].msgp->next;
- erts_msgq_replace_msg_ref(&rp->msg, tmp, mpp);
- erts_save_message_in_proc(rp, mp);
- mp = tmp;
- }
- }
- }
- else if (is_not_immed(msg)) {
- Uint sz = size_object(msg);
- mip[i].size = sz;
- tot_heap_size += sz;
- }
-
-#endif
-
- mip[i].msgp = mp;
- i++;
- mp = mp->next;
- }
-
- return tot_heap_size;
-}
-
void erts_factory_proc_init(ErtsHeapFactory* factory,
Process* p)
{
@@ -1448,7 +1235,7 @@ erts_factory_message_create(ErtsHeapFactory* factory,
int on_heap;
erts_aint32_t state;
- state = proc ? erts_smp_atomic32_read_nob(&proc->state) : 0;
+ state = proc ? erts_atomic32_read_nob(&proc->state) : 0;
if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
msgp = erts_alloc_message(sz, &hp);
@@ -1463,7 +1250,7 @@ erts_factory_message_create(ErtsHeapFactory* factory,
}
if (on_heap) {
- ERTS_SMP_ASSERT(*proc_locksp & ERTS_PROC_LOCK_MAIN);
+ ERTS_ASSERT(*proc_locksp & ERTS_PROC_LOCK_MAIN);
ASSERT(ohp == &proc->off_heap);
factory->mode = FACTORY_HALLOC;
factory->p = proc;
@@ -1577,32 +1364,18 @@ void erts_factory_dummy_init(ErtsHeapFactory* factory)
factory->mode = FACTORY_CLOSED;
}
-static void reserve_heap(ErtsHeapFactory*, Uint need, Uint xtra);
-
-Eterm* erts_produce_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
-{
- Eterm* res;
-
- ASSERT((unsigned int)factory->mode > (unsigned int)FACTORY_CLOSED);
- if (factory->hp + need > factory->hp_end) {
- reserve_heap(factory, need, xtra);
- }
- res = factory->hp;
- factory->hp += need;
- return res;
-}
-
Eterm* erts_reserve_heap(ErtsHeapFactory* factory, Uint need)
{
ASSERT((unsigned int)factory->mode > (unsigned int)FACTORY_CLOSED);
if (factory->hp + need > factory->hp_end) {
- reserve_heap(factory, need, 200);
+ erts_reserve_heap__(factory, need, 200);
}
return factory->hp;
}
-static void reserve_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
+void erts_reserve_heap__(ErtsHeapFactory* factory, Uint need, Uint xtra)
{
+ /* internal... */
ErlHeapFragment* bp;
switch (factory->mode) {
@@ -1612,7 +1385,9 @@ static void reserve_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
factory->hp_end = factory->hp + need;
return;
- case FACTORY_MESSAGE:
+ case FACTORY_MESSAGE: {
+ int replace_oh;
+ int replace_msg_hfrag;
if (!factory->heap_frags) {
ASSERT(factory->message->data.attached == ERTS_MSG_COMBINED_HFRAG);
bp = &factory->message->hfrag;
@@ -1624,25 +1399,45 @@ static void reserve_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
bp = factory->heap_frags;
}
+ replace_oh = 0;
+ replace_msg_hfrag = 0;
+
if (bp) {
- ASSERT(factory->hp > bp->mem);
+ ASSERT(factory->hp >= bp->mem);
ASSERT(factory->hp <= factory->hp_end);
ASSERT(factory->hp_end == bp->mem + bp->alloc_size);
bp->used_size = factory->hp - bp->mem;
+ if (!bp->used_size && factory->heap_frags) {
+ factory->heap_frags = bp->next;
+ bp->next = NULL;
+ ASSERT(!bp->off_heap.first);
+ if (factory->off_heap == &bp->off_heap)
+ replace_oh = !0;
+ if (factory->message && factory->message->data.heap_frag == bp)
+ replace_msg_hfrag = !0;
+ free_message_buffer(bp);
+ }
}
bp = (ErlHeapFragment*) ERTS_HEAP_ALLOC(factory->alloc_type,
ERTS_HEAP_FRAG_SIZE(need+xtra));
bp->next = factory->heap_frags;
factory->heap_frags = bp;
bp->alloc_size = need + xtra;
- bp->used_size = need;
+ bp->used_size = need + xtra;
bp->off_heap.first = NULL;
bp->off_heap.overhead = 0;
-
+ if (replace_oh) {
+ factory->off_heap = &bp->off_heap;
+ factory->off_heap_saved.first = factory->off_heap->first;
+ factory->off_heap_saved.overhead = factory->off_heap->overhead;
+ }
+ if (replace_msg_hfrag)
+ factory->message->data.heap_frag = bp;
factory->hp = bp->mem;
factory->hp_end = bp->mem + bp->alloc_size;
return;
+ }
case FACTORY_STATIC:
case FACTORY_CLOSED:
@@ -1725,9 +1520,11 @@ void erts_factory_trim_and_close(ErtsHeapFactory* factory,
if (bp->next == NULL) {
Uint used_sz = factory->hp - bp->mem;
ASSERT(used_sz <= bp->alloc_size);
- if (used_sz > 0)
- bp = erts_resize_message_buffer(bp, used_sz,
- brefs, brefs_size);
+ if (used_sz > 0) {
+ if (used_sz != bp->alloc_size)
+ bp = erts_resize_message_buffer(bp, used_sz,
+ brefs, brefs_size);
+ }
else {
free_message_buffer(bp);
bp = NULL;
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 42ed14e69c..b2550814fd 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,6 +21,11 @@
#ifndef __ERL_MESSAGE_H__
#define __ERL_MESSAGE_H__
+#include "sys.h"
+#define ERTS_PROC_SIG_QUEUE_TYPE_ONLY
+#include "erl_proc_sig_queue.h"
+#undef ERTS_PROC_SIG_QUEUE_TYPE_ONLY
+
struct proc_bin;
struct external_thing_;
@@ -84,12 +89,33 @@ void erts_factory_static_init(ErtsHeapFactory*, Eterm* hp, Uint size, ErlOffHeap
void erts_factory_tmp_init(ErtsHeapFactory*, Eterm* hp, Uint size, Uint32 atype);
void erts_factory_dummy_init(ErtsHeapFactory*);
-Eterm* erts_produce_heap(ErtsHeapFactory*, Uint need, Uint xtra);
+ERTS_GLB_INLINE Eterm* erts_produce_heap(ErtsHeapFactory*, Uint need, Uint xtra);
+
Eterm* erts_reserve_heap(ErtsHeapFactory*, Uint need);
void erts_factory_close(ErtsHeapFactory*);
void erts_factory_trim_and_close(ErtsHeapFactory*,Eterm *brefs, Uint brefs_size);
void erts_factory_undo(ErtsHeapFactory*);
+void erts_reserve_heap__(ErtsHeapFactory*, Uint need, Uint xtra); /* internal */
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Eterm *
+erts_produce_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
+{
+ Eterm* res;
+
+ ASSERT((unsigned int)factory->mode > (unsigned int)FACTORY_CLOSED);
+ if (factory->hp + need > factory->hp_end) {
+ erts_reserve_heap__(factory, need, xtra);
+ }
+ res = factory->hp;
+ factory->hp += need;
+ return res;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#ifdef CHECK_FOR_HOLES
# define ERTS_FACTORY_HOLE_CHECK(f) do { \
/*if ((f)->p) erts_check_for_holes((f)->p);*/ \
@@ -118,15 +144,16 @@ struct erl_heap_fragment {
};
/* m[0] = message, m[1] = seq trace token */
-#define ERL_MESSAGE_REF_ARRAY_SZ 2
+#define ERL_MESSAGE_REF_ARRAY_SZ 3
#define ERL_MESSAGE_TERM(mp) ((mp)->m[0])
#define ERL_MESSAGE_TOKEN(mp) ((mp)->m[1])
+#define ERL_MESSAGE_FROM(mp) ((mp)->m[2])
#ifdef USE_VM_PROBES
/* m[2] = dynamic trace user tag */
#undef ERL_MESSAGE_REF_ARRAY_SZ
-#define ERL_MESSAGE_REF_ARRAY_SZ 3
-#define ERL_MESSAGE_DT_UTAG(mp) ((mp)->m[2])
+#define ERL_MESSAGE_REF_ARRAY_SZ 4
+#define ERL_MESSAGE_DT_UTAG(mp) ((mp)->m[3])
#else
#endif
@@ -157,30 +184,124 @@ struct erl_mesg {
ErlHeapFragment hfrag;
};
+/*
+ * The ErtsMessage struct is only one special type
+ * of signal. All signal structs have a common
+ * begining and can be differentiated by looking
+ * at the ErtsSignal 'common.tag' field.
+ *
+ * - An ordinary message will have a value
+ * - A distribution message that has not been
+ * decoded yet will have the non-value.
+ * - Other signals will have an external pid
+ * header tag. In order to differentiate
+ * between those signals one needs to look
+ * at the arity part of the header (see
+ * erts_proc_sig_queue.h).
+ */
+
+#define ERTS_SIG_IS_NON_MSG_TAG(Tag) \
+ (is_external_pid_header((Tag)))
+
+#define ERTS_SIG_IS_NON_MSG(Sig) \
+ ERTS_SIG_IS_NON_MSG_TAG(((ErtsSignal *) (Sig))->common.tag)
+
+#define ERTS_SIG_IS_INTERNAL_MSG_TAG(Tag) \
+ (!is_header((Tag)))
+#define ERTS_SIG_IS_INTERNAL_MSG(Sig) \
+ ERTS_SIG_IS_INTERNAL_MSG_TAG(((ErtsSignal *) (Sig))->common.tag)
+
+#define ERTS_SIG_IS_EXTERNAL_MSG_TAG(Tag) \
+ ((Tag) == THE_NON_VALUE)
+#define ERTS_SIG_IS_EXTERNAL_MSG(Sig) \
+ ERTS_SIG_IS_EXTERNAL_MSG_TAG(((ErtsSignal *) (Sig))->common.tag)
+
+#define ERTS_SIG_IS_MSG_TAG(Tag) \
+ (!ERTS_SIG_IS_NON_MSG_TAG(Tag))
+#define ERTS_SIG_IS_MSG(Sig) \
+ ERTS_SIG_IS_MSG_TAG(((ErtsSignal *) (Sig))->common.tag)
+
+typedef union {
+ ErtsSignalCommon common;
+ ErtsMessageRef msg;
+} ErtsSignal;
+
+typedef struct {
+ /* pointers to next pointers pointing to... */
+ ErtsMessage **next; /* ... next (non-message) signal */
+ ErtsMessage **last; /* ... last (non-message) signal */
+} ErtsMsgQNMSigs;
+
/* Size of default message buffer (erl_message.c) */
#define ERL_MESSAGE_BUF_SZ 500
typedef struct {
- ErtsMessage* first;
- ErtsMessage** last; /* point to the last next pointer */
- ErtsMessage** save;
- Sint len; /* queue length */
-
/*
- * The following two fields are used by the recv_mark/1 and
- * recv_set/1 instructions.
+ * ** The signal queues private to a process. **
+ *
+ * These are:
+ * - an inner queue which only consists of
+ * message signals
+ * - a middle queue which contains a mixture
+ * of message and non-message signals
+ *
+ * When the process isn't processing signals in
+ * erts_proc_sig_handle_incoming():
+ * - the message queue corresponds to the inner
+ * queue. Messages in the middle queue (and
+ * in the outer queue) are in transit and
+ * have NOT been received yet!
+ *
+ * When the process is processing signals in
+ * erts_proc_sig_handle_incoming():
+ * - the message queue corresponds to the inner
+ * queue plus the head of the middle queue up
+ * to the signal currently being processed.
+ * Any messages further back in the middle queue
+ * (and in the outer queue) are still in transit
+ * and have NOT been received yet!
+ *
+ * In the general case the 'len' field of this
+ * structure does NOT correspond to the message
+ * queue length. When the process is inspected
+ * via process info it does however correspond
+ * to the message queue length, but this is a
+ * special case!
+ *
+ * When no process-info request is in transit to
+ * the process the 'len' field corresponds to
+ * the total amount of messages in inner and
+ * middle queues (which does NOT correspond to
+ * the message queue length). When process-info
+ * requests are in transit to the process, the
+ * usage of the 'len' field changes and is used
+ * as an offset which even might be negative.
*/
- BeamInstr* mark; /* address to rec_loop/2 instruction */
- ErtsMessage** saved_last; /* saved last pointer */
-} ErlMessageQueue;
-#ifdef ERTS_SMP
+ /* inner queue */
+ ErtsMessage *first;
+ ErtsMessage **last; /* point to the last next pointer */
+ ErtsMessage **save;
+
+ /* middle queue */
+ ErtsMessage *cont;
+ ErtsMessage **cont_last;
+ ErtsMsgQNMSigs nmsigs;
+
+ /* Common for inner and middle queue */
+ ErtsMessage **saved_last; /* saved last pointer */
+ Sint len; /* NOT message queue length (see above) */
+} ErtsSignalPrivQueues;
typedef struct {
ErtsMessage* first;
ErtsMessage** last; /* point to the last next pointer */
- Sint len; /* queue length */
-} ErlMessageInQueue;
+ Sint len; /* number of messages in queue */
+ ErtsMsgQNMSigs nmsigs;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+ int may_contain_heap_terms;
+#endif
+} ErtsSignalInQueue;
typedef struct erl_trace_message_queue__ {
struct erl_trace_message_queue__ *next; /* point to the next receiver */
@@ -190,10 +311,45 @@ typedef struct erl_trace_message_queue__ {
Sint len; /* queue length */
} ErlTraceMessageQueue;
-#endif
+#define ERTS_RECV_MARK_SAVE(P) \
+ do { \
+ erts_proc_lock((P), ERTS_PROC_LOCK_MSGQ); \
+ erts_proc_sig_fetch((P)); \
+ erts_proc_unlock((P), ERTS_PROC_LOCK_MSGQ); \
+ if ((P)->sig_qs.cont) { \
+ (P)->sig_qs.saved_last = (P)->sig_qs.cont_last; \
+ (P)->flags |= F_DEFERRED_SAVED_LAST; \
+ } \
+ else { \
+ (P)->sig_qs.saved_last = (P)->sig_qs.last; \
+ (P)->flags &= ~F_DEFERRED_SAVED_LAST; \
+ } \
+ } while (0)
+
+#define ERTS_RECV_MARK_SET(P) \
+ do { \
+ if ((P)->sig_qs.saved_last) { \
+ if ((P)->flags & F_DEFERRED_SAVED_LAST) { \
+ /* Points to middle queue; use end of inner */ \
+ (P)->sig_qs.save = (P)->sig_qs.last; \
+ ASSERT(!PEEK_MESSAGE((P))); \
+ } \
+ else { \
+ /* Points to inner queue; safe to use */ \
+ (P)->sig_qs.save = (P)->sig_qs.saved_last; \
+ } \
+ } \
+ } while (0)
+
+#define ERTS_RECV_MARK_CLEAR(P) \
+ do { \
+ (P)->sig_qs.saved_last = NULL; \
+ (P)->flags &= ~F_DEFERRED_SAVED_LAST; \
+ } while (0)
+
/* Get "current" message */
-#define PEEK_MESSAGE(p) (*(p)->msg.save)
+#define PEEK_MESSAGE(p) (*(p)->sig_qs.save)
#ifdef USE_VM_PROBES
#define LINK_MESSAGE_DTAG(mp, dt) ERL_MESSAGE_DT_UTAG(mp) = dt
@@ -201,67 +357,53 @@ typedef struct erl_trace_message_queue__ {
#define LINK_MESSAGE_DTAG(mp, dt)
#endif
-#define LINK_MESSAGE_IMPL(p, first_msg, last_msg, num_msgs, where) do { \
- *(p)->where.last = (first_msg); \
- (p)->where.last = (last_msg); \
- (p)->where.len += (num_msgs); \
- } while(0)
-
-#ifdef ERTS_SMP
-
-/* Add message last in private message queue */
-#define LINK_MESSAGE_PRIVQ(p, first_msg, last_msg, len) \
- do { \
- LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg); \
- } while (0)
-
-/* Add message last_msg in message queue */
-#define LINK_MESSAGE(p, first_msg, last_msg, len) \
- LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg_inq)
-
-#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p) \
- do { \
- if (p->msg_inq.first) { \
- *p->msg.last = p->msg_inq.first; \
- p->msg.last = p->msg_inq.last; \
- p->msg.len += p->msg_inq.len; \
- p->msg_inq.first = NULL; \
- p->msg_inq.last = &p->msg_inq.first; \
- p->msg_inq.len = 0; \
- } \
- } while (0)
-
+#ifdef USE_VM_PROBES
+# define ERTS_MSG_RECV_TRACED(P) \
+ ((ERTS_TRACE_FLAGS((P)) & F_TRACE_RECEIVE) \
+ || DTRACE_ENABLED(message_queued))
#else
+# define ERTS_MSG_RECV_TRACED(P) \
+ (ERTS_TRACE_FLAGS((P)) & F_TRACE_RECEIVE)
-#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p)
+#endif
-/* Add message last_msg in message queue */
-#define LINK_MESSAGE(p, first_msg, last_msg, len) \
+/* Add one message last in message queue */
+#define LINK_MESSAGE(p, msg) \
do { \
- LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg); \
+ ASSERT(ERTS_SIG_IS_MSG(msg)); \
+ ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__((p), "before"); \
+ *(p)->sig_inq.last = (msg); \
+ (p)->sig_inq.last = &(msg)->next; \
+ (p)->sig_inq.len++; \
+ ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__((p), "before"); \
} while(0)
-#endif
-
/* Unlink current message */
-#define UNLINK_MESSAGE(p,msgp) do { \
- ErtsMessage* __mp = (msgp)->next; \
- *(p)->msg.save = __mp; \
- (p)->msg.len--; \
- if (__mp == NULL) \
- (p)->msg.last = (p)->msg.save; \
- (p)->msg.mark = 0; \
-} while(0)
+#define UNLINK_MESSAGE(p,msgp) \
+ do { \
+ ErtsMessage *mp__ = (msgp)->next; \
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__((p), 0, "before"); \
+ *(p)->sig_qs.save = mp__; \
+ (p)->sig_qs.len--; \
+ if (mp__ == NULL) \
+ (p)->sig_qs.last = (p)->sig_qs.save; \
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__((p), 0, "after"); \
+ } while(0)
-/* Reset message save point (after receive match) */
-#define JOIN_MESSAGE(p) \
- (p)->msg.save = &(p)->msg.first
+/*
+ * Reset message save point (after receive match).
+ * Also invalidate the saved position since it may no
+ * longer be safe to use.
+ */
+#define JOIN_MESSAGE(p) \
+ do { \
+ (p)->sig_qs.save = &(p)->sig_qs.first; \
+ ERTS_RECV_MARK_CLEAR((p)); \
+ } while(0)
/* Save current message */
#define SAVE_MESSAGE(p) \
- (p)->msg.save = &(*(p)->msg.save)->next
-
-#define ERTS_SND_FLG_NO_SEQ_TRACE (((unsigned) 1) << 0)
+ (p)->sig_qs.save = &(*(p)->sig_qs.save)->next
#define ERTS_HEAP_FRAG_SIZE(DATA_WORDS) \
(sizeof(ErlHeapFragment) - sizeof(Eterm) + (DATA_WORDS)*sizeof(Eterm))
@@ -285,7 +427,8 @@ typedef struct erl_trace_message_queue__ {
do { \
(MP)->next = NULL; \
ERL_MESSAGE_TERM(MP) = THE_NON_VALUE; \
- ERL_MESSAGE_TOKEN(MP) = NIL; \
+ ERL_MESSAGE_TOKEN(MP) = THE_NON_VALUE; \
+ ERL_MESSAGE_FROM(MP) = NIL; \
ERL_MESSAGE_DT_UTAG_INIT(MP); \
MP->data.attached = NULL; \
} while (0)
@@ -296,11 +439,12 @@ ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint,
Eterm *, Uint);
void free_message_buffer(ErlHeapFragment *);
void erts_queue_dist_message(Process*, ErtsProcLocks, ErtsDistExternal *, Eterm, Eterm);
-Sint erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm);
-Sint erts_queue_messages(Process*, ErtsProcLocks,
- ErtsMessage*, ErtsMessage**, Uint, Eterm);
+void erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm);
+void erts_queue_proc_message(Process* from,Process* to, ErtsProcLocks,ErtsMessage*, Eterm);
+void erts_queue_proc_messages(Process* from, Process* to, ErtsProcLocks,
+ ErtsMessage*, ErtsMessage**, Uint);
void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm);
-Sint erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
+void erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm);
void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp);
Uint erts_msg_attached_data_size_aux(ErtsMessage *msg);
@@ -315,16 +459,6 @@ int erts_decode_dist_message(Process *, ErtsProcLocks, ErtsMessage *, int);
void erts_cleanup_messages(ErtsMessage *mp);
-typedef struct {
- Uint size;
- ErtsMessage *msgp;
-} ErtsMessageInfo;
-
-Uint erts_prep_msgq_for_inspection(Process *c_p,
- Process *rp,
- ErtsProcLocks rp_locks,
- ErtsMessageInfo *mip);
-
void *erts_alloc_message_ref(void);
void erts_free_message_ref(void *);
@@ -366,12 +500,6 @@ ERTS_GLB_FORCE_INLINE ErtsMessage *erts_shrink_message(ErtsMessage *mp, Uint sz,
ERTS_GLB_FORCE_INLINE void erts_free_message(ErtsMessage *mp);
ERTS_GLB_INLINE Uint erts_used_frag_sz(const ErlHeapFragment*);
ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErtsMessage *msg);
-ERTS_GLB_INLINE void erts_msgq_update_internal_pointers(ErlMessageQueue *msgq,
- ErtsMessage **newpp,
- ErtsMessage **oldpp);
-ERTS_GLB_INLINE void erts_msgq_replace_msg_ref(ErlMessageQueue *msgq,
- ErtsMessage *newp,
- ErtsMessage **oldpp);
#define ERTS_MSG_COMBINED_HFRAG ((void *) 0x1)
@@ -475,28 +603,6 @@ ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErtsMessage *msg)
}
}
-ERTS_GLB_INLINE void
-erts_msgq_update_internal_pointers(ErlMessageQueue *msgq,
- ErtsMessage **newpp,
- ErtsMessage **oldpp)
-{
- if (msgq->save == oldpp)
- msgq->save = newpp;
- if (msgq->last == oldpp)
- msgq->last = newpp;
- if (msgq->saved_last == oldpp)
- msgq->saved_last = newpp;
-}
-
-ERTS_GLB_INLINE void
-erts_msgq_replace_msg_ref(ErlMessageQueue *msgq, ErtsMessage *newp, ErtsMessage **oldpp)
-{
- ErtsMessage *oldp = *oldpp;
- newp->next = oldp->next;
- erts_msgq_update_internal_pointers(msgq, &newp->next, &oldp->next);
- *oldpp = newp;
-}
-
#endif
Uint erts_mbuf_size(Process *p);
@@ -510,4 +616,17 @@ Uint erts_mbuf_size(Process *p);
# define ERTS_CHK_MBUF_SZ(P) ((void) 1)
#endif
+#define ERTS_FOREACH_SIG_PRIVQS(PROC, MVAR, CODE) \
+ do { \
+ int i__; \
+ ErtsMessage *msgs__[] = {(PROC)->sig_qs.first, \
+ (PROC)->sig_qs.cont}; \
+ for (i__ = 0; i__ < sizeof(msgs__)/sizeof(msgs__[0]); i__++) { \
+ ErtsMessage *MVAR; \
+ for (MVAR = msgs__[i__]; MVAR; MVAR = MVAR->next) { \
+ CODE; \
+ } \
+ } \
+ } while (0)
+
#endif
diff --git a/erts/emulator/beam/erl_monitor_link.c b/erts/emulator/beam/erl_monitor_link.c
new file mode 100644
index 0000000000..48d9bd4ca5
--- /dev/null
+++ b/erts/emulator/beam/erl_monitor_link.c
@@ -0,0 +1,1326 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Monitor and link implementation.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include <stddef.h>
+#include "global.h"
+#include "erl_node_tables.h"
+#include "erl_monitor_link.h"
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Red-black tree implementation used for monitors and links. *
+\* */
+
+static ERTS_INLINE Eterm
+ml_get_key(ErtsMonLnkNode *mln)
+{
+ char *ptr = (char *) mln;
+ ptr += mln->key_offset;
+
+#ifdef ERTS_ML_DEBUG
+ switch (mln->type) {
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_PORT:
+ case ERTS_MON_TYPE_DIST_PROC:
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ case ERTS_MON_TYPE_RESOURCE: {
+ ErtsMonitorData *mdp = erts_monitor_to_data(mln);
+ ERTS_ML_ASSERT(&mdp->ref == (Eterm *) ptr);
+ break;
+ }
+ case ERTS_LNK_TYPE_PROC:
+ case ERTS_LNK_TYPE_DIST_PROC:
+ case ERTS_LNK_TYPE_PORT:
+ case ERTS_MON_TYPE_NODE:
+ case ERTS_MON_TYPE_NODES:
+ case ERTS_MON_TYPE_SUSPEND:
+ ERTS_ML_ASSERT(&mln->other.item == (Eterm *) ptr);
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid type");
+ break;
+ }
+#endif
+
+ return *((Eterm *) ptr);
+}
+
+/*
+ * Comparison functions for valid *and only valid* keys. That
+ * is, if the set of valid keys is changed, this function needs
+ * to be updated...
+ *
+ * Note that this function does *not* order terms according to
+ * term order, and that the order may vary between emulator
+ * restarts...
+ */
+static int
+ml_cmp_keys(Eterm key1, Eterm key2)
+{
+ /*
+ * A monitor key is either an internal pid, a (nodename) atom,
+ * a small (monitor nodes bitmask), an ordinary internal ref,
+ * or an external ref.
+ *
+ * Order between monitors with keys of different types:
+ * internal pid < atom < small < internal ref < external ref
+ *
+ * A link key is either a pid, an internal port
+ *
+ * Order between links with keys of different types:
+ * internal pid < internal port < external pid
+ *
+ */
+ ERTS_ML_ASSERT(is_internal_pid(key1)
+ || is_internal_port(key1)
+ || is_atom(key1)
+ || is_small(key1)
+ || is_external_pid(key1)
+ || is_internal_ordinary_ref(key1)
+ || is_external_ref(key1));
+
+ ERTS_ML_ASSERT(is_internal_pid(key2)
+ || is_internal_port(key2)
+ || is_atom(key2)
+ || is_small(key2)
+ || is_external_pid(key2)
+ || is_internal_ordinary_ref(key2)
+ || is_external_ref(key2));
+
+ if (is_immed(key1)) {
+ int key1_tag, key2_tag;
+
+ ERTS_ML_ASSERT(is_internal_pid(key1)
+ || is_internal_port(key1)
+ || is_atom(key1)
+ || is_small(key1));
+
+ if (key1 == key2)
+ return 0;
+
+ if (is_boxed(key2))
+ return -1;
+
+ ERTS_ML_ASSERT(is_internal_pid(key2)
+ || is_internal_port(key2)
+ || is_atom(key2)
+ || is_small(key2));
+
+ key1_tag = (int) (key1 & _TAG_IMMED1_MASK);
+ key2_tag = (int) (key2 & _TAG_IMMED1_MASK);
+
+ if (key1_tag != key2_tag)
+ return key1_tag - key2_tag;
+
+ ASSERT((is_atom(key1) && is_atom(key2))
+ || (is_small(key1) && is_small(key2))
+ || (is_internal_pid(key1) && is_internal_pid(key2))
+ || (is_internal_port(key1) && is_internal_port(key2)));
+
+ return key1 < key2 ? -1 : 1;
+ }
+ else {
+ Eterm *w1, hdr1;
+
+ ERTS_ML_ASSERT(is_boxed(key1));
+
+ w1 = boxed_val(key1);
+ hdr1 = *w1;
+
+ if ((hdr1 & _TAG_HEADER_MASK) == _TAG_HEADER_REF) {
+ Eterm *w2;
+
+ if (!is_internal_ref(key2))
+ return is_immed(key2) ? 1 : -1;
+
+ w2 = internal_ref_val(key2);
+
+ ERTS_ML_ASSERT(w1[0] == ERTS_REF_THING_HEADER);
+ ERTS_ML_ASSERT(w2[0] == ERTS_REF_THING_HEADER);
+
+ return sys_memcmp((void *) &w1[1], (void *) &w2[1],
+ (ERTS_REF_THING_SIZE - 1)*sizeof(Eterm));
+ }
+
+ ERTS_ML_ASSERT(is_external(key1));
+
+ if (is_not_external(key2))
+ return 1;
+ else {
+ Uint ndw1, ndw2;
+ ExternalThing *et1, *et2;
+ ErlNode *n1, *n2;
+
+ ERTS_ML_ASSERT((is_external_ref(key1) && is_external_ref(key2))
+ || (is_external_pid(key1) && is_external_pid(key2)));
+
+ et1 = (ExternalThing *) w1;
+ et2 = (ExternalThing *) external_val(key2);
+
+ n1 = et1->node;
+ n2 = et2->node;
+
+ if (n1 != n2) {
+ if (n1->sysname != n2->sysname)
+ return n1->sysname < n2->sysname ? -1 : 1;
+ ASSERT(n1->creation != n2->creation);
+ return n1->creation < n2->creation ? -1 : 1;
+ }
+
+ ndw1 = external_thing_data_words(et1);
+ ndw2 = external_thing_data_words(et1);
+ if (ndw1 != ndw2)
+ return ndw1 < ndw2 ? -1 : 1;
+
+ return sys_memcmp((void *) &et1->data.ui[0],
+ (void *) &et2->data.ui[0],
+ ndw1*sizeof(Eterm));
+ }
+ }
+}
+
+#define ERTS_ML_TPFLG_RED (((UWord) 1) << 0)
+
+#define ERTS_ML_TPFLGS_MASK (ERTS_ML_TPFLG_RED)
+
+#define ERTS_RBT_PREFIX mon_lnk
+#define ERTS_RBT_T ErtsMonLnkNode
+#define ERTS_RBT_KEY_T Eterm
+#define ERTS_RBT_FLAGS_T UWord
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->node.tree.parent = (UWord) NULL; \
+ (T)->node.tree.right = NULL; \
+ (T)->node.tree.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) \
+ (!!((T)->node.tree.parent & ERTS_ML_TPFLG_RED))
+#define ERTS_RBT_SET_RED(T) \
+ ((T)->node.tree.parent |= ERTS_ML_TPFLG_RED)
+#define ERTS_RBT_IS_BLACK(T) \
+ (!ERTS_RBT_IS_RED((T)))
+#define ERTS_RBT_SET_BLACK(T) \
+ ((T)->node.tree.parent &= ~ERTS_ML_TPFLG_RED)
+#define ERTS_RBT_GET_FLAGS(T) \
+ ((T)->node.tree.parent & ERTS_ML_TPFLGS_MASK)
+#define ERTS_RBT_SET_FLAGS(T, F) \
+ do { \
+ ERTS_ML_ASSERT((((UWord) (F)) & ~ERTS_ML_TPFLGS_MASK) == 0); \
+ (T)->node.tree.parent &= ~ERTS_ML_TPFLGS_MASK; \
+ (T)->node.tree.parent |= (F); \
+ } while (0)
+#define ERTS_RBT_GET_PARENT(T) \
+ ((ERTS_RBT_T *) ((T)->node.tree.parent & ~ERTS_ML_TPFLGS_MASK))
+#define ERTS_RBT_SET_PARENT(T, P) \
+ do { \
+ ERTS_ML_ASSERT((((UWord) (P)) & ERTS_ML_TPFLGS_MASK) == 0); \
+ (T)->node.tree.parent &= ERTS_ML_TPFLGS_MASK; \
+ (T)->node.tree.parent |= (UWord) (P); \
+ } while (0)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->node.tree.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->node.tree.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->node.tree.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->node.tree.left = (L))
+#define ERTS_RBT_GET_KEY(T) (ml_get_key((T)))
+#define ERTS_RBT_CMP_KEYS(KX, KY) (ml_cmp_keys((KX), (KY)))
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_LOOKUP_INSERT
+#define ERTS_RBT_WANT_LOOKUP_CREATE
+#define ERTS_RBT_WANT_INSERT
+#define ERTS_RBT_WANT_REPLACE
+#define ERTS_RBT_WANT_FOREACH
+#define ERTS_RBT_WANT_FOREACH_YIELDING
+#define ERTS_RBT_WANT_FOREACH_DESTROY
+#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Tree Operations *
+\* */
+
+static ErtsMonLnkNode *
+ml_rbt_lookup(ErtsMonLnkNode *root, Eterm ref)
+{
+ ErtsMonLnkNode *ml = mon_lnk_rbt_lookup(root, ref);
+ ASSERT(!ml || (ml->flags & ERTS_ML_FLG_IN_TABLE));
+ return ml;
+}
+
+static ErtsMonLnkNode *
+ml_rbt_lookup_insert(ErtsMonLnkNode **root, ErtsMonLnkNode *ml)
+{
+ ErtsMonLnkNode *res;
+ ERTS_ML_ASSERT(!(ml->flags & ERTS_ML_FLG_IN_TABLE));
+ res = mon_lnk_rbt_lookup_insert(root, ml);
+ if (!res)
+ ml->flags |= ERTS_ML_FLG_IN_TABLE;
+ return res;
+}
+
+static ErtsMonLnkNode *
+ml_rbt_lookup_create(ErtsMonLnkNode ** root, Eterm key,
+ ErtsMonLnkNode *(*create)(Eterm, void *),
+ void *carg, int *created)
+{
+ ErtsMonLnkNode *ml;
+ ml = mon_lnk_rbt_lookup_create(root, key, create, carg, created);
+ if (*created)
+ ml->flags |= ERTS_ML_FLG_IN_TABLE;
+ return ml;
+}
+
+static void
+ml_rbt_insert(ErtsMonLnkNode **root, ErtsMonLnkNode *ml)
+{
+#ifdef ERTS_ML_DEBUG
+ ErtsMonLnkNode *res;
+ ERTS_ML_ASSERT(!(ml->flags & ERTS_ML_FLG_IN_TABLE));
+ res = ml_rbt_lookup(*root, ml_get_key(ml));
+ ERTS_ML_ASSERT(res == NULL);
+#endif
+
+ mon_lnk_rbt_insert(root, ml);
+ ml->flags |= ERTS_ML_FLG_IN_TABLE;
+}
+
+static void
+ml_rbt_replace(ErtsMonLnkNode **root, ErtsMonLnkNode *old, ErtsMonLnkNode *new)
+{
+ ERTS_ML_ASSERT(old->flags & ERTS_ML_FLG_IN_TABLE);
+ ERTS_ML_ASSERT(!(new->flags & ERTS_ML_FLG_IN_TABLE));
+
+ mon_lnk_rbt_replace(root, old, new);
+ old->flags &= ~ERTS_ML_FLG_IN_TABLE;
+ new->flags |= ERTS_ML_FLG_IN_TABLE;
+}
+
+static void
+ml_rbt_delete(ErtsMonLnkNode **root, ErtsMonLnkNode *ml)
+{
+ ERTS_ML_ASSERT(ml->flags & ERTS_ML_FLG_IN_TABLE);
+ mon_lnk_rbt_delete(root, ml);
+ ml->flags &= ~ERTS_ML_FLG_IN_TABLE;
+}
+
+
+static void
+ml_rbt_foreach(ErtsMonLnkNode *root,
+ void (*func)(ErtsMonLnkNode *, void *),
+ void *arg)
+{
+ mon_lnk_rbt_foreach(root, func, arg);
+}
+
+typedef struct {
+ ErtsMonLnkNode *root;
+ mon_lnk_rbt_yield_state_t rbt_ystate;
+} ErtsMonLnkYieldState;
+
+static int
+ml_rbt_foreach_yielding(ErtsMonLnkNode *root,
+ void (*func)(ErtsMonLnkNode *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ int res;
+ ErtsMonLnkYieldState ys = {root, ERTS_RBT_YIELD_STAT_INITER};
+ ErtsMonLnkYieldState *ysp;
+
+ ysp = (ErtsMonLnkYieldState *) *vyspp;
+ if (!ysp)
+ ysp = &ys;
+ res = mon_lnk_rbt_foreach_yielding(ysp->root, func, arg,
+ &ysp->rbt_ystate, limit);
+ if (res == 0) {
+ if (ysp != &ys)
+ erts_free(ERTS_ALC_T_ML_YIELD_STATE, ysp);
+ *vyspp = NULL;
+ }
+ else {
+
+ if (ysp == &ys) {
+ ysp = erts_alloc(ERTS_ALC_T_ML_YIELD_STATE,
+ sizeof(ErtsMonLnkYieldState));
+ sys_memcpy((void *) ysp, (void *) &ys,
+ sizeof(ErtsMonLnkYieldState));
+ }
+
+ *vyspp = (void *) ysp;
+ }
+
+ return res;
+}
+
+typedef struct {
+ void (*func)(ErtsMonLnkNode *, void *);
+ void *arg;
+} ErtsMonLnkForeachDeleteContext;
+
+static void
+rbt_wrap_foreach_delete(ErtsMonLnkNode *ml, void *vctxt)
+{
+ ErtsMonLnkForeachDeleteContext *ctxt = vctxt;
+ ERTS_ML_ASSERT(ml->flags & ERTS_ML_FLG_IN_TABLE);
+ ml->flags &= ~ERTS_ML_FLG_IN_TABLE;
+ ctxt->func(ml, ctxt->arg);
+}
+
+static void
+ml_rbt_foreach_delete(ErtsMonLnkNode **root,
+ void (*func)(ErtsMonLnkNode *, void *),
+ void *arg)
+{
+ ErtsMonLnkForeachDeleteContext ctxt;
+ ctxt.func = func;
+ ctxt.arg = arg;
+ mon_lnk_rbt_foreach_destroy(root,
+ rbt_wrap_foreach_delete,
+ (void *) &ctxt);
+}
+
+static int
+ml_rbt_foreach_delete_yielding(ErtsMonLnkNode **root,
+ void (*func)(ErtsMonLnkNode *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ int res;
+ ErtsMonLnkYieldState ys = {*root, ERTS_RBT_YIELD_STAT_INITER};
+ ErtsMonLnkYieldState *ysp;
+ ErtsMonLnkForeachDeleteContext ctxt;
+ ctxt.func = func;
+ ctxt.arg = arg;
+
+ ysp = (ErtsMonLnkYieldState *) *vyspp;
+ if (!ysp) {
+ *root = NULL;
+ ysp = &ys;
+ }
+ res = mon_lnk_rbt_foreach_destroy_yielding(&ysp->root,
+ rbt_wrap_foreach_delete,
+ (void *) &ctxt,
+ &ysp->rbt_ystate,
+ limit);
+ if (res == 0) {
+ if (ysp != &ys)
+ erts_free(ERTS_ALC_T_ML_YIELD_STATE, ysp);
+ *vyspp = NULL;
+ }
+ else {
+
+ if (ysp == &ys) {
+ ysp = erts_alloc(ERTS_ALC_T_ML_YIELD_STATE,
+ sizeof(ErtsMonLnkYieldState));
+ sys_memcpy((void *) ysp, (void *) &ys,
+ sizeof(ErtsMonLnkYieldState));
+ }
+
+ *vyspp = (void *) ysp;
+ }
+
+ return res;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * List Operations *
+\* */
+
+static int
+ml_dl_list_foreach_yielding(ErtsMonLnkNode *list,
+ void (*func)(ErtsMonLnkNode *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ Sint cnt = 0;
+ ErtsMonLnkNode *ml = (ErtsMonLnkNode *) *vyspp;
+
+ ERTS_ML_ASSERT(!ml || list);
+
+ if (!ml)
+ ml = list;
+
+ if (ml) {
+ do {
+ ERTS_ML_ASSERT(ml->flags & ERTS_ML_FLG_IN_TABLE);
+ func(ml, arg);
+ ml = ml->node.list.next;
+ cnt++;
+ } while (ml != list && cnt < limit);
+ if (ml != list) {
+ *vyspp = (void *) ml;
+ return 1; /* yield */
+ }
+ }
+
+ *vyspp = NULL;
+ return 0; /* done */
+}
+
+static int
+ml_dl_list_foreach_delete_yielding(ErtsMonLnkNode **list,
+ void (*func)(ErtsMonLnkNode *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ Sint cnt = 0;
+ ErtsMonLnkNode *first = *list;
+ ErtsMonLnkNode *ml = (ErtsMonLnkNode *) *vyspp;
+
+ ERTS_ML_ASSERT(!ml || first);
+
+ if (!ml)
+ ml = first;
+
+ if (ml) {
+ do {
+ ErtsMonLnkNode *next = ml->node.list.next;
+ ERTS_ML_ASSERT(ml->flags & ERTS_ML_FLG_IN_TABLE);
+ ml->flags &= ~ERTS_ML_FLG_IN_TABLE;
+ func(ml, arg);
+ ml = next;
+ cnt++;
+ } while (ml != first && cnt < limit);
+ if (ml != first) {
+ *vyspp = (void *) ml;
+ return 1; /* yield */
+ }
+ }
+
+ *vyspp = NULL;
+ *list = NULL;
+ return 0; /* done */
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Misc *
+\* */
+
+ErtsMonLnkDist *
+erts_mon_link_dist_create(Eterm nodename)
+{
+ ErtsMonLnkDist *mld = erts_alloc(ERTS_ALC_T_ML_DIST,
+ sizeof(ErtsMonLnkDist));
+ mld->nodename = nodename;
+ mld->connection_id = ~((Uint32) 0);
+ erts_atomic_init_nob(&mld->refc, 1);
+ erts_mtx_init(&mld->mtx, "dist_entry_links", nodename,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ mld->alive = !0;
+ mld->links = NULL;
+ mld->monitors = NULL;
+ mld->orig_name_monitors = NULL;
+ return mld;
+}
+
+void
+erts_mon_link_dist_destroy__(ErtsMonLnkDist *mld)
+{
+ ERTS_ML_ASSERT(erts_atomic_read_nob(&mld->refc) == 0);
+ ERTS_ML_ASSERT(!mld->alive);
+ ERTS_ML_ASSERT(!mld->links);
+ ERTS_ML_ASSERT(!mld->monitors);
+ ERTS_ML_ASSERT(!mld->orig_name_monitors);
+
+ erts_mtx_destroy(&mld->mtx);
+ erts_free(ERTS_ALC_T_ML_DIST, mld);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Monitor Operations *
+\* */
+
+#ifdef ERTS_ML_DEBUG
+size_t erts_monitor_origin_offset;
+size_t erts_monitor_origin_key_offset;
+size_t erts_monitor_target_offset;
+size_t erts_monitor_target_key_offset;
+size_t erts_monitor_node_key_offset;
+#endif
+
+static ERTS_INLINE void
+monitor_init(void)
+{
+#ifdef ERTS_ML_DEBUG
+ erts_monitor_origin_offset = offsetof(ErtsMonitorData, origin);
+ erts_monitor_origin_key_offset = offsetof(ErtsMonitorData, ref);
+ ASSERT(erts_monitor_origin_key_offset >= erts_monitor_origin_offset);
+ erts_monitor_origin_key_offset -= erts_monitor_origin_offset;
+ erts_monitor_target_offset = offsetof(ErtsMonitorData, target);
+ erts_monitor_target_key_offset = offsetof(ErtsMonitorData, ref);
+ ASSERT(erts_monitor_target_key_offset >= erts_monitor_target_offset);
+ erts_monitor_target_key_offset -= erts_monitor_target_offset;
+ erts_monitor_node_key_offset = offsetof(ErtsMonitor, other.item);
+#endif
+}
+
+ErtsMonitor *
+erts_monitor_tree_lookup(ErtsMonitor *root, Eterm key)
+{
+ ERTS_ML_ASSERT(is_internal_ordinary_ref(key)
+ || is_external_ref(key)
+ || is_atom(key)
+ || is_small(key)
+ || is_internal_pid(key));
+ return (ErtsMonitor *) ml_rbt_lookup((ErtsMonLnkNode *) root, key);
+}
+
+ErtsMonitor *
+erts_monotor_tree_lookup_insert(ErtsMonitor **root, ErtsMonitor *mon)
+{
+ return (ErtsMonitor *) ml_rbt_lookup_insert((ErtsMonLnkNode **) root,
+ (ErtsMonLnkNode *) mon);
+}
+
+typedef struct {
+ Uint16 type;
+ Eterm origin;
+} ErtsMonitorCreateCtxt;
+
+static ErtsMonLnkNode *
+create_monitor(Eterm target, void *vcctxt)
+{
+ ErtsMonitorCreateCtxt *cctxt = vcctxt;
+ ErtsMonitorData *mdp = erts_monitor_create(cctxt->type,
+ NIL,
+ cctxt->origin,
+ target,
+ NIL);
+ ERTS_ML_ASSERT(ml_cmp_keys(ml_get_key(&mdp->origin), target) == 0);
+ return (ErtsMonLnkNode *) &mdp->origin;
+}
+
+ErtsMonitor *
+erts_monitor_tree_lookup_create(ErtsMonitor **root, int *created, Uint16 type,
+ Eterm origin, Eterm target)
+{
+ ErtsMonitor *res;
+ ErtsMonitorCreateCtxt cctxt = {type, origin};
+
+ ERTS_ML_ASSERT(type == ERTS_MON_TYPE_NODE
+ || type == ERTS_MON_TYPE_NODES
+ || type == ERTS_MON_TYPE_SUSPEND);
+
+ res = (ErtsMonitor *) ml_rbt_lookup_create((ErtsMonLnkNode **) root,
+ target, create_monitor,
+ (void *) &cctxt,
+ created);
+
+ ERTS_ML_ASSERT(res && erts_monitor_is_origin(res));
+
+ return res;
+}
+
+void
+erts_monitor_tree_insert(ErtsMonitor **root, ErtsMonitor *mon)
+{
+ ml_rbt_insert((ErtsMonLnkNode **) root, (ErtsMonLnkNode *) mon);
+}
+
+void
+erts_monitor_tree_replace(ErtsMonitor **root, ErtsMonitor *old, ErtsMonitor *new)
+{
+ ml_rbt_replace((ErtsMonLnkNode **) root,
+ (ErtsMonLnkNode *) old,
+ (ErtsMonLnkNode *) new);
+}
+
+void
+erts_monitor_tree_delete(ErtsMonitor **root, ErtsMonitor *mon)
+{
+ ml_rbt_delete((ErtsMonLnkNode **) root, (ErtsMonLnkNode *) mon);
+}
+
+void
+erts_monitor_tree_foreach(ErtsMonitor *root,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg)
+{
+ ml_rbt_foreach((ErtsMonLnkNode *) root,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg);
+}
+
+int
+erts_monitor_tree_foreach_yielding(ErtsMonitor *root,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ return ml_rbt_foreach_yielding((ErtsMonLnkNode *) root,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg, vyspp, limit);
+}
+
+void
+erts_monitor_tree_foreach_delete(ErtsMonitor **root,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg)
+{
+ ml_rbt_foreach_delete((ErtsMonLnkNode **) root,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg);
+}
+
+int
+erts_monitor_tree_foreach_delete_yielding(ErtsMonitor **root,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ return ml_rbt_foreach_delete_yielding((ErtsMonLnkNode **) root,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg, vyspp, limit);
+}
+
+void
+erts_monitor_list_foreach(ErtsMonitor *list,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg)
+{
+ void *ystate = NULL;
+ while (ml_dl_list_foreach_yielding((ErtsMonLnkNode *) list,
+ (void (*)(ErtsMonLnkNode *, void *)) func,
+ arg, &ystate, (Sint) INT_MAX));
+}
+
+int
+erts_monitor_list_foreach_yielding(ErtsMonitor *list,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ return ml_dl_list_foreach_yielding((ErtsMonLnkNode *) list,
+ (void (*)(ErtsMonLnkNode *, void *)) func,
+ arg, vyspp, limit);
+}
+
+void
+erts_monitor_list_foreach_delete(ErtsMonitor **list,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg)
+{
+ void *ystate = NULL;
+ while (ml_dl_list_foreach_delete_yielding((ErtsMonLnkNode **) list,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg, &ystate, (Sint) INT_MAX));
+}
+
+int
+erts_monitor_list_foreach_delete_yielding(ErtsMonitor **list,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ return ml_dl_list_foreach_delete_yielding((ErtsMonLnkNode **) list,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg, vyspp, limit);
+}
+
+ErtsMonitorData *
+erts_monitor_create(Uint16 type, Eterm ref, Eterm orgn, Eterm trgt, Eterm name)
+{
+ ErtsMonitorData *mdp;
+
+ switch (type) {
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_PORT:
+ if (is_nil(name)) {
+ ErtsMonitorDataHeap *mdhp;
+ ErtsORefThing *ortp;
+
+ case ERTS_MON_TYPE_TIME_OFFSET:
+
+ ERTS_ML_ASSERT(is_nil(name));
+ ERTS_ML_ASSERT(is_immed(orgn) && is_immed(trgt));
+ ERTS_ML_ASSERT(is_internal_ordinary_ref(ref));
+
+ mdhp = erts_alloc(ERTS_ALC_T_MONITOR, sizeof(ErtsMonitorDataHeap));
+ mdp = &mdhp->md;
+ ERTS_ML_ASSERT(((void *) mdp) == ((void *) mdhp));
+
+ ortp = (ErtsORefThing *) (char *) internal_ref_val(ref);
+ mdhp->oref_thing = *ortp;
+ mdp->ref = make_internal_ref(&mdhp->oref_thing.header);
+
+ mdp->origin.other.item = trgt;
+ mdp->origin.offset = (Uint16) offsetof(ErtsMonitorData, origin);
+ mdp->origin.key_offset = (Uint16) offsetof(ErtsMonitorData, ref);
+ ERTS_ML_ASSERT(mdp->origin.key_offset >= mdp->origin.offset);
+ mdp->origin.key_offset -= mdp->origin.offset;
+ mdp->origin.flags = (Uint16) 0;
+ mdp->origin.type = type;
+
+ mdp->target.other.item = orgn;
+ mdp->target.offset = (Uint16) offsetof(ErtsMonitorData, target);
+ mdp->target.key_offset = (Uint16) offsetof(ErtsMonitorData, ref);
+ ERTS_ML_ASSERT(mdp->target.key_offset >= mdp->target.offset);
+ mdp->target.key_offset -= mdp->target.offset;
+ mdp->target.flags = ERTS_ML_FLG_TARGET;
+ mdp->target.type = type;
+ break;
+ }
+ case ERTS_MON_TYPE_DIST_PROC:
+ case ERTS_MON_TYPE_RESOURCE:
+ case ERTS_MON_TYPE_NODE:
+ case ERTS_MON_TYPE_NODES: {
+ ErtsMonitorDataExtended *mdep;
+ Uint size = sizeof(ErtsMonitorDataExtended) - sizeof(Eterm);
+ Uint rsz, osz, tsz;
+ Eterm *hp;
+ ErlOffHeap oh;
+ Uint16 name_flag = is_nil(name) ? ((Uint16) 0) : ERTS_ML_FLG_NAME;
+
+ rsz = is_immed(ref) ? 0 : size_object(ref);
+ tsz = is_immed(trgt) ? 0 : size_object(trgt);
+ if (type == ERTS_MON_TYPE_RESOURCE)
+ osz = 0;
+ else
+ osz = is_immed(orgn) ? 0 : size_object(orgn);
+
+ size += (rsz + osz + tsz) * sizeof(Eterm);
+
+ mdep = erts_alloc(ERTS_ALC_T_MONITOR_EXT, size);
+
+ ERTS_INIT_OFF_HEAP(&oh);
+
+ hp = &mdep->heap[0];
+
+ mdp = &mdep->md;
+ ERTS_ML_ASSERT(((void *) mdp) == ((void *) mdep));
+
+ mdp->ref = rsz ? copy_struct(ref, rsz, &hp, &oh) : ref;
+
+ mdp->origin.other.item = tsz ? copy_struct(trgt, tsz, &hp, &oh) : trgt;
+ mdp->origin.offset = (Uint16) offsetof(ErtsMonitorData, origin);
+ mdp->origin.flags = ERTS_ML_FLG_EXTENDED|name_flag;
+ mdp->origin.type = type;
+
+ if (type == ERTS_MON_TYPE_RESOURCE)
+ mdp->target.other.ptr = (void *) orgn;
+ else
+ mdp->target.other.item = osz ? copy_struct(orgn, osz, &hp, &oh) : orgn;
+ mdp->target.offset = (Uint16) offsetof(ErtsMonitorData, target);
+ mdp->target.flags = ERTS_ML_FLG_TARGET|ERTS_ML_FLG_EXTENDED|name_flag;
+ mdp->target.type = type;
+
+ if (type == ERTS_MON_TYPE_NODE || type == ERTS_MON_TYPE_NODES) {
+ mdep->u.refc = 0;
+ mdp->origin.key_offset = (Uint16) offsetof(ErtsMonitor, other.item);
+ mdp->target.key_offset = (Uint16) offsetof(ErtsMonitor, other.item);
+ ERTS_ML_ASSERT(!oh.first);
+ mdep->uptr.node_monitors = NULL;
+ }
+ else {
+ mdep->u.name = name;
+
+ mdp->origin.key_offset = (Uint16) offsetof(ErtsMonitorData, ref);
+ ERTS_ML_ASSERT(mdp->origin.key_offset >= mdp->origin.offset);
+ mdp->origin.key_offset -= mdp->origin.offset;
+
+ mdp->target.key_offset = (Uint16) offsetof(ErtsMonitorData, ref);
+ ERTS_ML_ASSERT(mdp->target.key_offset >= mdp->target.offset);
+ mdp->target.key_offset -= mdp->target.offset;
+
+ mdep->uptr.ohhp = oh.first;
+ }
+ mdep->dist = NULL;
+ break;
+ }
+ case ERTS_MON_TYPE_SUSPEND: {
+ ErtsMonitorSuspend *msp;
+
+ ERTS_ML_ASSERT(is_nil(name));
+ ERTS_ML_ASSERT(is_nil(ref));
+ ERTS_ML_ASSERT(is_internal_pid(orgn) && is_internal_pid(trgt));
+
+ msp = erts_alloc(ERTS_ALC_T_MONITOR_SUSPEND,
+ sizeof(ErtsMonitorSuspend));
+ mdp = &msp->md;
+ ERTS_ML_ASSERT(((void *) mdp) == ((void *) msp));
+
+ mdp->ref = NIL;
+
+ mdp->origin.other.item = trgt;
+ mdp->origin.offset = (Uint16) offsetof(ErtsMonitorData, origin);
+ mdp->origin.key_offset = (Uint16) offsetof(ErtsMonitor, other.item);
+ ERTS_ML_ASSERT(mdp->origin.key_offset >= mdp->origin.offset);
+ mdp->origin.flags = (Uint16) ERTS_ML_FLG_EXTENDED;
+ mdp->origin.type = type;
+
+ mdp->target.other.item = orgn;
+ mdp->target.offset = (Uint16) offsetof(ErtsMonitorData, target);
+ mdp->target.key_offset = (Uint16) offsetof(ErtsMonitor, other.item);
+ mdp->target.flags = ERTS_ML_FLG_TARGET|ERTS_ML_FLG_EXTENDED;
+ mdp->target.type = type;
+
+ msp->next = NULL;
+ erts_atomic_init_relb(&msp->state, 0);
+
+ break;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid monitor type");
+ mdp = NULL;
+ break;
+ }
+
+ erts_atomic32_init_nob(&mdp->refc, 2);
+
+ return mdp;
+}
+
+/*
+ * erts_monitor_destroy__() should only be called from
+ * erts_monitor_release() or erts_monitor_release_both().
+ */
+void
+erts_monitor_destroy__(ErtsMonitorData *mdp)
+{
+ ERTS_ML_ASSERT(erts_atomic32_read_nob(&mdp->refc) == 0);
+ ERTS_ML_ASSERT(!(mdp->origin.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT((mdp->origin.flags & ERTS_ML_FLGS_SAME)
+ == (mdp->target.flags & ERTS_ML_FLGS_SAME));
+
+ if (!(mdp->origin.flags & ERTS_ML_FLG_EXTENDED))
+ erts_free(ERTS_ALC_T_MONITOR, mdp);
+ else if (mdp->origin.type == ERTS_MON_TYPE_SUSPEND)
+ erts_free(ERTS_ALC_T_MONITOR_SUSPEND, mdp);
+ else {
+ ErtsMonitorDataExtended *mdep = (ErtsMonitorDataExtended *) mdp;
+ ErlOffHeap oh;
+ if (mdp->origin.type == ERTS_MON_TYPE_NODE)
+ ERTS_ML_ASSERT(!mdep->uptr.node_monitors);
+ else if (mdep->uptr.ohhp) {
+ ERTS_INIT_OFF_HEAP(&oh);
+ oh.first = mdep->uptr.ohhp;
+ erts_cleanup_offheap(&oh);
+ }
+ if (mdep->dist)
+ erts_mon_link_dist_dec_refc(mdep->dist);
+ erts_free(ERTS_ALC_T_MONITOR_EXT, mdp);
+ }
+}
+
+void
+erts_monitor_set_dead_dist(ErtsMonitor *mon, Eterm nodename)
+{
+ ErtsMonitorDataExtended *mdep;
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
+
+ ERTS_ML_ASSERT(mon->flags & ERTS_ML_FLG_EXTENDED);
+ ERTS_ML_ASSERT(mon->type == ERTS_MON_TYPE_DIST_PROC);
+ ERTS_ML_ASSERT(!mdep->dist);
+
+ mdep->dist = erts_mon_link_dist_create(nodename);
+ mdep->dist->alive = 0;
+}
+
+Uint
+erts_monitor_size(ErtsMonitor *mon)
+{
+ Uint size, refc;
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
+
+ if (!(mon->flags & ERTS_ML_FLG_EXTENDED))
+ size = sizeof(ErtsMonitorDataHeap);
+ else if (mon->type == ERTS_MON_TYPE_SUSPEND)
+ size = sizeof(ErtsMonitorSuspend);
+ else {
+ ErtsMonitorDataExtended *mdep;
+ Uint hsz = 0;
+
+ mdep = (ErtsMonitorDataExtended *) mdp;
+
+ if (mon->type != ERTS_MON_TYPE_NODE
+ && mon->type != ERTS_MON_TYPE_NODES) {
+ if (!is_immed(mdep->md.ref))
+ hsz += NC_HEAP_SIZE(mdep->md.ref);
+ if (mon->type == ERTS_MON_TYPE_DIST_PROC) {
+ if (!is_immed(mdep->md.origin.other.item))
+ hsz += NC_HEAP_SIZE(mdep->md.origin.other.item);
+ if (!is_immed(mdep->md.target.other.item))
+ hsz += NC_HEAP_SIZE(mdep->md.target.other.item);
+ }
+ }
+ size = sizeof(ErtsMonitorDataExtended) + (hsz - 1)*sizeof(Eterm);
+ }
+
+ refc = (Uint) erts_atomic32_read_nob(&mdp->refc);
+ ASSERT(refc > 0);
+
+ return size / refc;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Link Operations *
+ * *
+\* */
+
+#ifdef ERTS_ML_DEBUG
+size_t erts_link_a_offset;
+size_t erts_link_b_offset;
+size_t erts_link_key_offset;
+#endif
+
+static ERTS_INLINE void
+link_init(void)
+{
+#ifdef ERTS_ML_DEBUG
+ erts_link_a_offset = offsetof(ErtsLinkData, a);
+ erts_link_b_offset = offsetof(ErtsLinkData, b);
+ erts_link_key_offset = offsetof(ErtsLink, other.item);
+#endif
+}
+
+ErtsLink *
+erts_link_tree_lookup(ErtsLink *root, Eterm key)
+{
+ ASSERT(is_pid(key) || is_internal_port(key));
+ return (ErtsLink *) ml_rbt_lookup((ErtsMonLnkNode *) root, key);
+}
+
+ErtsLink *
+erts_link_tree_lookup_insert(ErtsLink **root, ErtsLink *lnk)
+{
+ return (ErtsLink *) ml_rbt_lookup_insert((ErtsMonLnkNode **) root,
+ (ErtsMonLnkNode *) lnk);
+}
+
+typedef struct {
+ Uint16 type;
+ Eterm a;
+} ErtsLinkCreateCtxt;
+
+static ErtsMonLnkNode *
+create_link(Eterm b, void *vcctxt)
+{
+ ErtsLinkCreateCtxt *cctxt = vcctxt;
+ ErtsLinkData *ldp = erts_link_create(cctxt->type, cctxt->a, b);
+ ERTS_ML_ASSERT(ml_cmp_keys(ldp->a.other.item, b) == 0);
+ return (ErtsMonLnkNode *) &ldp->a;
+}
+
+ErtsLink *erts_link_tree_lookup_create(ErtsLink **root, int *created,
+ Uint16 type, Eterm this,
+ Eterm other)
+{
+ ErtsLinkCreateCtxt cctxt;
+ cctxt.type = type;
+ cctxt.a = this;
+ return (ErtsLink *) ml_rbt_lookup_create((ErtsMonLnkNode **) root,
+ other, create_link,
+ (void *) &cctxt,
+ created);
+}
+
+void
+erts_link_tree_insert(ErtsLink **root, ErtsLink *lnk)
+{
+ ml_rbt_insert((ErtsMonLnkNode **) root, (ErtsMonLnkNode *) lnk);
+}
+
+void
+erts_link_tree_replace(ErtsLink **root, ErtsLink *old, ErtsLink *new)
+{
+ ml_rbt_replace((ErtsMonLnkNode **) root,
+ (ErtsMonLnkNode *) old,
+ (ErtsMonLnkNode *) new);
+}
+
+void
+erts_link_tree_delete(ErtsLink **root, ErtsLink *lnk)
+{
+ ml_rbt_delete((ErtsMonLnkNode **) root, (ErtsMonLnkNode *) lnk);
+}
+
+void
+erts_link_tree_foreach(ErtsLink *root,
+ void (*func)(ErtsLink *, void *),
+ void *arg)
+{
+ ml_rbt_foreach((ErtsMonLnkNode *) root,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg);
+
+}
+
+int
+erts_link_tree_foreach_yielding(ErtsLink *root,
+ void (*func)(ErtsLink *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ return ml_rbt_foreach_yielding((ErtsMonLnkNode *) root,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg, vyspp, limit);
+}
+
+void
+erts_link_tree_foreach_delete(ErtsLink **root,
+ void (*func)(ErtsLink *, void *),
+ void *arg)
+{
+ ml_rbt_foreach_delete((ErtsMonLnkNode **) root,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg);
+}
+
+int
+erts_link_tree_foreach_delete_yielding(ErtsLink **root,
+ void (*func)(ErtsLink *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ return ml_rbt_foreach_delete_yielding((ErtsMonLnkNode **) root,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg, vyspp, limit);
+}
+
+void
+erts_link_list_foreach(ErtsLink *list,
+ void (*func)(ErtsLink *, void *),
+ void *arg)
+{
+ void *ystate = NULL;
+ while (ml_dl_list_foreach_yielding((ErtsMonLnkNode *) list,
+ (void (*)(ErtsMonLnkNode *, void *)) func,
+ arg, &ystate, (Sint) INT_MAX));
+}
+
+int
+erts_link_list_foreach_yielding(ErtsLink *list,
+ void (*func)(ErtsLink *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ return ml_dl_list_foreach_yielding((ErtsMonLnkNode *) list,
+ (void (*)(ErtsMonLnkNode *, void *)) func,
+ arg, vyspp, limit);
+}
+
+void
+erts_link_list_foreach_delete(ErtsLink **list,
+ void (*func)(ErtsLink *, void *),
+ void *arg)
+{
+ void *ystate = NULL;
+ while (ml_dl_list_foreach_delete_yielding((ErtsMonLnkNode **) list,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg, &ystate, (Sint) INT_MAX));
+}
+
+int
+erts_link_list_foreach_delete_yielding(ErtsLink **list,
+ void (*func)(ErtsLink *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit)
+{
+ return ml_dl_list_foreach_delete_yielding((ErtsMonLnkNode **) list,
+ (void (*)(ErtsMonLnkNode*, void*)) func,
+ arg, vyspp, limit);
+}
+
+ErtsLinkData *
+erts_link_create(Uint16 type, Eterm a, Eterm b)
+{
+ ErtsLinkData *ldp;
+
+#ifdef ERTS_ML_DEBUG
+ switch (type) {
+ case ERTS_LNK_TYPE_PROC:
+ ERTS_ML_ASSERT(is_internal_pid(a) && is_internal_pid(a));
+ break;
+ case ERTS_LNK_TYPE_PORT:
+ ERTS_ML_ASSERT(is_internal_pid(a) || is_internal_pid(b));
+ ERTS_ML_ASSERT(is_internal_port(a) || is_internal_port(b));
+ break;
+ case ERTS_LNK_TYPE_DIST_PROC:
+ ERTS_ML_ASSERT(is_internal_pid(a) || is_internal_pid(b));
+ ERTS_ML_ASSERT(is_external_pid(a) || is_external_pid(b));
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid link type");
+ break;
+ }
+#endif
+
+ if (type != ERTS_LNK_TYPE_DIST_PROC) {
+ ldp = erts_alloc(ERTS_ALC_T_LINK, sizeof(ErtsLinkData));
+
+ ldp->a.other.item = b;
+ ldp->a.flags = (Uint16) 0;
+
+ ldp->b.other.item = a;
+ ldp->b.flags = (Uint16) 0;
+ }
+ else {
+ ErtsLinkDataExtended *ldep;
+ Uint size, hsz;
+ Eterm *hp;
+ ErlOffHeap oh;
+
+ if (is_internal_pid(a))
+ hsz = NC_HEAP_SIZE(b);
+ else
+ hsz = NC_HEAP_SIZE(a);
+ ERTS_ML_ASSERT(hsz > 0);
+
+ size = sizeof(ErtsLinkDataExtended) - sizeof(Eterm);
+ size += hsz*sizeof(Eterm);
+
+ ldp = erts_alloc(ERTS_ALC_T_LINK_EXT, size);
+
+ ldp->a.flags = ERTS_ML_FLG_EXTENDED;
+ ldp->b.flags = ERTS_ML_FLG_EXTENDED;
+
+ ldep = (ErtsLinkDataExtended *) ldp;
+ hp = &ldep->heap[0];
+
+ ERTS_INIT_OFF_HEAP(&oh);
+
+ if (is_internal_pid(a)) {
+ ldp->a.other.item = STORE_NC(&hp, &oh, b);
+ ldp->b.other.item = a;
+ }
+ else {
+ ldp->a.other.item = b;
+ ldp->b.other.item = STORE_NC(&hp, &oh, a);
+ }
+
+ ldep->ohhp = oh.first;
+ ldep->dist = NULL;
+ }
+
+ erts_atomic32_init_nob(&ldp->refc, 2);
+
+ ldp->a.key_offset = (Uint16) offsetof(ErtsLink, other.item);
+ ldp->a.offset = (Uint16) offsetof(ErtsLinkData, a);
+ ldp->a.type = type;
+
+ ldp->b.key_offset = (Uint16) offsetof(ErtsLink, other.item);
+ ldp->b.offset = (Uint16) offsetof(ErtsLinkData, b);
+ ldp->b.type = type;
+
+ return ldp;
+}
+
+/*
+ * erts_link_destroy__() should only be called from
+ * erts_link_release() or erts_link_release_both().
+ */
+void
+erts_link_destroy__(ErtsLinkData *ldp)
+{
+ ERTS_ML_ASSERT(erts_atomic32_read_nob(&ldp->refc) == 0);
+ ERTS_ML_ASSERT(!(ldp->a.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT(!(ldp->b.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT((ldp->a.flags & ERTS_ML_FLGS_SAME)
+ == (ldp->b.flags & ERTS_ML_FLGS_SAME));
+
+ if (!(ldp->a.flags & ERTS_ML_FLG_EXTENDED))
+ erts_free(ERTS_ALC_T_LINK, ldp);
+ else {
+ ErtsLinkDataExtended *ldep = (ErtsLinkDataExtended *) ldp;
+ ErlOffHeap oh;
+ if (ldep->ohhp) {
+ ERTS_INIT_OFF_HEAP(&oh);
+ oh.first = ldep->ohhp;
+ erts_cleanup_offheap(&oh);
+ }
+ if (ldep->dist)
+ erts_mon_link_dist_dec_refc(ldep->dist);
+ erts_free(ERTS_ALC_T_LINK_EXT, ldep);
+ }
+}
+
+void
+erts_link_set_dead_dist(ErtsLink *lnk, Eterm nodename)
+{
+ ErtsLinkDataExtended *ldep;
+ ldep = (ErtsLinkDataExtended *) erts_link_to_data(lnk);
+
+ ERTS_ML_ASSERT(lnk->flags & ERTS_ML_FLG_EXTENDED);
+ ERTS_ML_ASSERT(lnk->type == ERTS_LNK_TYPE_DIST_PROC);
+ ERTS_ML_ASSERT(!ldep->dist);
+
+ ldep->dist = erts_mon_link_dist_create(nodename);
+ ldep->dist->alive = 0;
+}
+
+Uint
+erts_link_size(ErtsLink *lnk)
+{
+ Uint size, refc;
+ ErtsLinkData *ldp = erts_link_to_data(lnk);
+
+ if (!(lnk->flags & ERTS_ML_FLG_EXTENDED))
+ size = sizeof(ErtsLinkData);
+ else {
+ ErtsLinkDataExtended *ldep = (ErtsLinkDataExtended *) ldp;
+
+ ASSERT(lnk->type == ERTS_LNK_TYPE_DIST_PROC);
+ ASSERT(is_external_pid_header(ldep->heap[0]));
+
+ size = sizeof(ErtsLinkDataExtended);
+ size += thing_arityval(ldep->heap[0])*sizeof(Eterm);
+ }
+
+ refc = (Uint) erts_atomic32_read_nob(&ldp->refc);
+ ASSERT(refc > 0);
+
+ return size / refc;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Misc *
+\* */
+
+void
+erts_monitor_link_init(void)
+{
+ monitor_init();
+ link_init();
+}
diff --git a/erts/emulator/beam/erl_monitor_link.h b/erts/emulator/beam/erl_monitor_link.h
new file mode 100644
index 0000000000..ed7bf7d54a
--- /dev/null
+++ b/erts/emulator/beam/erl_monitor_link.h
@@ -0,0 +1,2354 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Monitor and link implementation.
+ *
+ * === Monitors ==================================================
+ *
+ * The monitor data structure contains:
+ * - an 'origin' part that should be inserted in a data structure
+ * of the origin entity, i.e., the entity monitoring another
+ * entity
+ * - a 'target' part that should be inserted in a data structure
+ * of the target entity, i.e., the entity being monitored by
+ * another entity
+ * - a shared part that contains information shared between both
+ * origin and target entities
+ *
+ * That is, the two halves of the monitor as well as shared data
+ * are allocated in one single continuous memory block. The
+ * origin and target parts can separately each be inserted in
+ * either a (red-black) tree, a (circular double linked) list, or
+ * in a process signal queue.
+ *
+ * Each process and port contains:
+ * - a monitor list for local target monitors that is accessed
+ * via the ERTS_P_LT_MONITORS() macro, and
+ * - a monitor tree for other monitors that is accessed via the
+ * ERTS_P_MONITORS() macro
+ *
+ * These fields of processes/ports are protected by the main lock
+ * of the process/port. These are only intended to be accessed by
+ * the process/port itself. When setting up or tearing down a
+ * monitor one should *only* operate on the monitor tree/list of
+ * the currently executing process/port and send signals to the
+ * other involved process/port so it can modify its own monitor
+ * tree/list by itself (see erl_proc_sig_queue.h). One should
+ * absolutely *not* acquire the lock of the other involved
+ * process/port and operate on its monitor tree/list directly.
+ *
+ * Each dist entry contains a monitor/link dist structure that
+ * contains:
+ * - a monitor tree for origin named monitors that is accessed via
+ * the field 'orig_name_monitors', and
+ * - a monitor list for other monitors that is accessed via the
+ * 'monitors' field.
+ * Monitors in these fields contain information about all monitors
+ * over this specific connection.
+ *
+ * The fields of the dist structure are protected by a mutex in
+ * the same dist structure. Operations on these fields are
+ * normally performed by the locally involved process only,
+ * except when a connection is taken down. However in the case
+ * of distributed named monitors that originates from another
+ * node this is not possible. That is this operation is also
+ * performed from another context that the locally involved
+ * process.
+ *
+ * Access to monitor trees are performed using the
+ * erts_monitor_tree_* functions below. Access to monitor lists
+ * are performed using the erts_monitor_list_* functions below.
+ *
+ *
+ * The different monitor types:
+ *
+ * --- ERTS_MON_TYPE_PROC ----------------------------------------
+ *
+ * A local process (origin) monitors another local process
+ * (target).
+ *
+ * Origin:
+ * Other Item: Target process identifier
+ * Target:
+ * Other Item: Origin process identifier
+ * Shared:
+ * Key: Reference
+ * Name: Name (atom) if by name
+ *
+ * Valid keys are only ordinary internal references.
+ *
+ * Origin part of the monitor is stored in the monitor tree of
+ * origin process and target part of the monitor is stored in
+ * monitor list for local targets on the target process.
+ *
+ * --- ERTS_MON_TYPE_PORT ----------------------------------------
+ *
+ * A local process (origin) monitors a local port (target), or a
+ * local port (origin) monitors a local process (target).
+ *
+ * Origin:
+ * Other Item: Target process/port identifier
+ * Target:
+ * Other Item: Origin process/port identifier
+ * Shared:
+ * Key: Reference
+ * Name: Name (atom) if by name
+ *
+ * Valid keys are only ordinary internal references.
+ *
+ * Origin part of the monitor is stored in the monitor tree of
+ * origin process/port and target part of the monitor is stored
+ * in monitor list for local targets on the target process/port.
+ *
+ *
+ * --- ERTS_MON_TYPE_TIME_OFFSET ---------------------------------
+ *
+ * A local process (origin) monitors time offset (target)
+ *
+ * Origin:
+ * Other Item: clock_service
+ * Target:
+ * Other Item: Origin process identifier
+ * Shared:
+ * Key: Reference
+ *
+ * Valid keys are only ordinary internal references.
+ *
+ * Origin part of the monitor is stored in the monitor tree of
+ * origin process and target part of the monitor is stored in
+ * monitor list referred by the variable 'time_offset_monitors'
+ * (see erl_time_sup.c).
+ *
+ *
+ * --- ERTS_MON_TYPE_DIST_PROC -----------------------------------
+ *
+ * A local process (origin) monitors a remote process (target).
+ * Origin node on local process and target node on dist entry.
+ *
+ * Origin:
+ * Other Item: Remote process identifier/Node name
+ * if by name
+ * Target:
+ * Other Item: Local process identifier
+ * Shared:
+ * Key: Reference
+ * Name: Name (atom) if by name
+ * Dist: Pointer to dist structure
+ *
+ * Valid keys are only ordinary internal references.
+ *
+ * Origin part of the monitor is stored in the monitor tree of
+ * origin process and target part of the monitor is stored in
+ * monitor list referred by 'monitors' field of the dist
+ * structure.
+ *
+ *
+ * A remote process (origin) monitors a local process (target).
+ * Origin node on dist entry and target node on local process.
+ *
+ * Origin:
+ * Other Item: Local process identifier
+ * Target:
+ * Other Item: Remote process identifier
+ * Shared:
+ * Key: Reference
+ * Name: Name (atom) if by name
+ *
+ * Valid keys are only external references.
+ *
+ * If monitor by name, the origin part of the monitor is stored
+ * in the monitor tree referred by 'orig_name_monitors' field in
+ * dist structure; otherwise in the monitor list referred by
+ * 'monitors' field in dist structure. The target part of the
+ * monitor is stored in the monitor tree of the local target
+ * process.
+ *
+ *
+ * --- ERTS_MON_TYPE_RESOURCE ------------------------------------
+ *
+ * A NIF resource (origin) monitors a process (target).
+ *
+ * Origin:
+ * Other Item: Target process identifier
+ * Target:
+ * Other Ptr: Pointer to resource
+ * Shared:
+ * Key: Reference
+ *
+ * Valid keys are only ordinary internal references.
+ *
+ * Origin part of the monitor is stored in the monitor tree of
+ * origin resource (see erl_nif.c) and target part of the
+ * monitor is stored in monitor list for local targets on the
+ * target process.
+ *
+ * --- ERTS_MON_TYPE_NODE ----------------------------------------
+ *
+ * A local process (origin) monitors a distribution connection
+ * (target) via erlang:monitor_node().
+ *
+ * Origin:
+ * Other Item: Node name (atom)
+ * Key: Node name
+ * Target:
+ * Other Item: Origin process identifier
+ * Key: Origin process identifier
+ * Shared:
+ * Refc: Number of invocations
+ *
+ * Valid keys are only node-name atoms and internal process
+ * identifiers.
+ *
+ * Origin part of the monitor is stored in the monitor tree of
+ * origin process and target part of the monitor is stored in
+ * monitor list referred by 'monitors' field of the dist
+ * structure.
+ *
+ * --- ERTS_MON_TYPE_NODES ---------------------------------------
+ *
+ * A local process (origin) monitors all connections (target),
+ * via net_kernel:monitor_nodes().
+ *
+ * Origin:
+ * Other Item: Bit mask (small)
+ * Key: Bit mask
+ * Target:
+ * Other Item: Origin process identifier
+ * Key: Origin process identifier
+ * Shared:
+ * Refc: Number of invocations
+ *
+ * Valid keys are only small integers and internal process
+ * identifiers.
+ *
+ * Origin part of the monitor is stored in the monitor tree of
+ * origin process and target part of the monitor is stored in
+ * monitor list referred by the variable 'nodes_monitors' (see
+ * dist.c).
+ *
+ * --- ERTS_MON_TYPE_SUSPEND -------------------------------------
+ *
+ * Suspend monitor. A local process (origin) suspends another
+ * local process (target).
+ *
+ * Origin:
+ * Other Item: Process identifier of suspendee
+ * (target)
+ * Key: Process identifier of suspendee
+ * (target)
+ * Target:
+ * Other Item: Process identifier of suspender
+ * (origin)
+ * Key: Process identifier of suspender
+ * (origin)
+ * Shared:
+ * Next: Pointer to another suspend monitor
+ * State: Number of suspends and a flag
+ * indicating if the suspend is
+ * active or not.
+ *
+ * Origin part of the monitor is stored in the monitor tree of
+ * origin process and target part of the monitor is stored in
+ * monitor list for local targets on the target process.
+ *
+ *
+ *
+ * === Links =====================================================
+ *
+ * The link data structure contains:
+ * - an 'a' part that should be inserted in a data structure of
+ * one entity and contains the identifier of the other involved
+ * entity (b)
+ * - a 'b' part that should be inserted in a data structure of
+ * the other involved entity and contains the identifier of the
+ * other involved entity (a)
+ * - shared part that contains information shared between both
+ * involved entities
+ *
+ * That is, the two halves of the link as well as shared data
+ * are allocated in one single continuous memory block. The 'a'
+ * and the 'b' parts can separately each be inserted in either
+ * a (red-black) tree, a (circular double linked) list, or in a
+ * process signal queue.
+ *
+ * Each process and port contains:
+ * - a link tree for links that is accessed via the
+ * ERTS_P_LINKS() macro
+ *
+ * This field of processes/ports is protected by the main lock of
+ * the process/port. It is only intended to be accessed by the
+ * process/port itself. When setting up or tearing down a link
+ * one should *only* operate on the link tree of the currently
+ * executing process/port and send signals to the other involved
+ * process/port so it can modify its own monitor tree by itself
+ * (see erl_proc_sig_queue.h). One should absolutely *not*
+ * acquire the lock of the other involved process/port and
+ * operate on its link tree directly.
+ *
+ * Each dist entry contains a monitor/link dist structure that
+ * contains:
+ * - a link list for links via the 'links' field.
+ * Links in this field contain information about all links over
+ * this specific connection.
+ *
+ * The fields of the dist structure are protected by a mutex in
+ * the same dist structure. Operation on the 'links' fields are
+ * normally performed by the locally involved process only,
+ * except when a connection is taken down.
+ *
+ * Access to link trees are performed using the erts_link_tree_*
+ * functions below. Access to link lists are performed using the
+ * erts_link_list_* functions below.
+ *
+ * There can only be one link between the same pair of
+ * processes/ports. Since a link can be simultaneously initiated
+ * from both ends we always save the link data structure with the
+ * lowest address if multiple links should appear between the
+ * same pair of processes/ports.
+ *
+ *
+ * The different link types:
+ *
+ * --- ERTS_LNK_TYPE_PROC -----------------------------------------
+ *
+ * A link between a local process A and a local process B.
+ *
+ * A:
+ * Other Item: B process identifier
+ * Key: B process identifier
+ * B:
+ * Other Item: A process identifier
+ * Key: A process identifier
+ *
+ * Valid keys are only internal process identifiers.
+ *
+ * 'A' part of the link stored in the link tree of process A and
+ * 'B' part of the link is stored in link tree of process B.
+ *
+ * --- ERTS_LNK_TYPE_PORT -----------------------------------------
+ *
+ * A link between a local process/port A and a local process/port
+ * B.
+ *
+ * A:
+ * Other Item: B process/port identifier
+ * Key: B process/port identifier
+ * B:
+ * Other Item: A process/port identifier
+ * Key: A process/port identifier
+ *
+ * Valid keys are internal process identifiers and internal port
+ * identifiers.
+ *
+ * 'A' part of the link stored in the link tree of process/port
+ * A and 'B' part of the link is stored in link tree of
+ * process/port B.
+ *
+ * --- ERTS_LNK_TYPE_DIST_PROC ------------------------------------
+ *
+ * A link between a local process and a remote process. Either of
+ * the processes can be used as A or B.
+ *
+ * A:
+ * Other Item: B process identifier
+ * Key: B process identifier
+ * B:
+ * Other Item: A process identifier
+ * Key: A process identifier
+ * Shared:
+ * Dist: Pointer to dist structure
+ *
+ * Valid keys are internal and external process identifiers.
+ *
+ * The part of the link with a remote pid as "other item" is
+ * stored in the link tree of the local process. The part of
+ * the link with a local pid as "other item" is stored in the
+ * links list of the dist structure.
+ *
+ * ===============================================================
+ *
+ * Author: Rickard Green
+ *
+ */
+
+#ifndef ERL_MONITOR_LINK_H__
+#define ERL_MONITOR_LINK_H__
+
+#define ERTS_PROC_SIG_QUEUE_TYPE_ONLY
+#include "erl_proc_sig_queue.h"
+#undef ERTS_PROC_SIG_QUEUE_TYPE_ONLY
+
+#if defined(DEBUG) || 0
+# define ERTS_ML_DEBUG
+#else
+# undef ERTS_ML_DEBUG
+#endif
+
+#ifdef ERTS_ML_DEBUG
+# define ERTS_ML_ASSERT ERTS_ASSERT
+#else
+# define ERTS_ML_ASSERT(E) ((void) 1)
+#endif
+
+#define ERTS_MON_TYPE_MAX ((Uint16) 7)
+
+#define ERTS_MON_TYPE_PROC ((Uint16) 0)
+#define ERTS_MON_TYPE_PORT ((Uint16) 1)
+#define ERTS_MON_TYPE_TIME_OFFSET ((Uint16) 2)
+#define ERTS_MON_TYPE_DIST_PROC ((Uint16) 3)
+#define ERTS_MON_TYPE_RESOURCE ((Uint16) 4)
+#define ERTS_MON_TYPE_NODE ((Uint16) 5)
+#define ERTS_MON_TYPE_NODES ((Uint16) 6)
+#define ERTS_MON_TYPE_SUSPEND ERTS_MON_TYPE_MAX
+
+#define ERTS_MON_LNK_TYPE_MAX (ERTS_MON_TYPE_MAX + ((Uint16) 3))
+#define ERTS_LNK_TYPE_MAX ERTS_MON_LNK_TYPE_MAX
+
+#define ERTS_LNK_TYPE_PROC (ERTS_MON_TYPE_MAX + ((Uint16) 1))
+#define ERTS_LNK_TYPE_PORT (ERTS_MON_TYPE_MAX + ((Uint16) 2))
+#define ERTS_LNK_TYPE_DIST_PROC ERTS_LNK_TYPE_MAX
+
+#define ERTS_ML_FLG_TARGET (((Uint16) 1) << 0)
+#define ERTS_ML_FLG_IN_TABLE (((Uint16) 1) << 1)
+#define ERTS_ML_FLG_IN_SUBTABLE (((Uint16) 1) << 2)
+#define ERTS_ML_FLG_NAME (((Uint16) 1) << 3)
+#define ERTS_ML_FLG_EXTENDED (((Uint16) 1) << 4)
+
+#define ERTS_ML_FLG_DBG_VISITED (((Uint16) 1) << 15)
+
+/* Flags that should be the same on both monitor/link halves */
+#define ERTS_ML_FLGS_SAME \
+ (ERTS_ML_FLG_EXTENDED|ERTS_ML_FLG_NAME)
+
+typedef struct ErtsMonLnkNode__ ErtsMonLnkNode;
+
+typedef struct {
+ UWord parent; /* Parent ptr and flags... */
+ ErtsMonLnkNode *right;
+ ErtsMonLnkNode *left;
+} ErtsMonLnkTreeNode;
+
+typedef struct {
+ ErtsMonLnkNode *next;
+ ErtsMonLnkNode *prev;
+} ErtsMonLnkListNode;
+
+struct ErtsMonLnkNode__ {
+ union {
+ ErtsSignalCommon signal;
+ ErtsMonLnkTreeNode tree;
+ ErtsMonLnkListNode list;
+ } node;
+ union {
+ Eterm item;
+ void *ptr;
+ } other;
+ Uint16 offset; /* offset from monitor/link data to this structure (node) */
+ Uint16 key_offset; /* offset from this structure (node) to key */
+ Uint16 flags;
+ Uint16 type;
+};
+
+typedef struct {
+ Eterm nodename;
+ Uint32 connection_id;
+ erts_atomic_t refc;
+ erts_mtx_t mtx;
+ int alive;
+ ErtsMonLnkNode *links; /* Link double linked circular list */
+ ErtsMonLnkNode *monitors; /* Monitor double linked circular list */
+ ErtsMonLnkNode *orig_name_monitors; /* Origin named monitors
+ read-black tree */
+} ErtsMonLnkDist;
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Misc *
+\* */
+
+/**
+ *
+ * @brief Initialize monitor/link implementation
+ *
+ */
+void erts_monitor_link_init(void);
+
+/**
+ *
+ * @brief Create monitor/link dist structure to attach to dist entry
+ *
+ * Create dist structure containing monitor and link containers. This
+ * structure is to be attached to a connected dist entry.
+ *
+ * @param[in] nodename Node name as an atom
+ *
+ * @returns Pointer to dist structure
+ *
+ */
+ErtsMonLnkDist *erts_mon_link_dist_create(Eterm nodename);
+
+/**
+ *
+ * @brief Increase reference count of monitor/link dist structure
+ *
+ * @param[in] mld Pointer to dist structure
+ *
+ */
+ERTS_GLB_INLINE void erts_mon_link_dist_inc_refc(ErtsMonLnkDist *mld);
+
+/**
+ *
+ * @brief Decrease reference count of monitor/link dist structure
+ *
+ * @param[in] mld Pointer to dist structure
+ *
+ */
+ERTS_GLB_INLINE void erts_mon_link_dist_dec_refc(ErtsMonLnkDist *mld);
+
+/* internal functions... */
+ERTS_GLB_INLINE void erts_ml_dl_list_insert__(ErtsMonLnkNode **list,
+ ErtsMonLnkNode *ml);
+ERTS_GLB_INLINE void erts_ml_dl_list_delete__(ErtsMonLnkNode **list,
+ ErtsMonLnkNode *ml);
+ERTS_GLB_INLINE ErtsMonLnkNode *erts_ml_dl_list_first__(ErtsMonLnkNode *list);
+ERTS_GLB_INLINE ErtsMonLnkNode *erts_ml_dl_list_last__(ErtsMonLnkNode *list);
+void erts_mon_link_dist_destroy__(ErtsMonLnkDist *mld);
+ERTS_GLB_INLINE void *erts_ml_node_to_main_struct__(ErtsMonLnkNode *mln);
+
+/* implementations for globally inlined misc functions... */
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_mon_link_dist_inc_refc(ErtsMonLnkDist *mld)
+{
+ ERTS_ML_ASSERT(erts_atomic_read_nob(&mld->refc) > 0);
+ erts_atomic_inc_nob(&mld->refc);
+}
+
+ERTS_GLB_INLINE void
+erts_mon_link_dist_dec_refc(ErtsMonLnkDist *mld)
+{
+ ERTS_ML_ASSERT(erts_atomic_read_nob(&mld->refc) > 0);
+ if (erts_atomic_dec_read_nob(&mld->refc) == 0)
+ erts_mon_link_dist_destroy__(mld);
+}
+
+ERTS_GLB_INLINE void *
+erts_ml_node_to_main_struct__(ErtsMonLnkNode *mln)
+{
+ return (void *) (((char *) mln) - ((size_t) mln->offset));
+}
+
+ERTS_GLB_INLINE void
+erts_ml_dl_list_insert__(ErtsMonLnkNode **list, ErtsMonLnkNode *ml)
+{
+ ErtsMonLnkNode *first = *list;
+ ERTS_ML_ASSERT(!(ml->flags & ERTS_ML_FLG_IN_TABLE));
+ if (!first) {
+ ml->node.list.next = ml->node.list.prev = ml;
+ *list = ml;
+ }
+ else {
+ ERTS_ML_ASSERT(first->node.list.prev->node.list.next == first);
+ ERTS_ML_ASSERT(first->node.list.next->node.list.prev == first);
+ ml->node.list.next = first;
+ ml->node.list.prev = first->node.list.prev;
+ first->node.list.prev = ml;
+ ml->node.list.prev->node.list.next = ml;
+ }
+ ml->flags |= ERTS_ML_FLG_IN_TABLE;
+}
+
+ERTS_GLB_INLINE void
+erts_ml_dl_list_delete__(ErtsMonLnkNode **list, ErtsMonLnkNode *ml)
+{
+ ERTS_ML_ASSERT(ml->flags & ERTS_ML_FLG_IN_TABLE);
+ if (ml->node.list.next == ml) {
+ ERTS_ML_ASSERT(ml->node.list.prev == ml);
+ ERTS_ML_ASSERT(*list == ml);
+
+ *list = NULL;
+ }
+ else {
+ ERTS_ML_ASSERT(ml->node.list.prev->node.list.next == ml);
+ ERTS_ML_ASSERT(ml->node.list.prev != ml);
+ ERTS_ML_ASSERT(ml->node.list.next->node.list.prev == ml);
+ ERTS_ML_ASSERT(ml->node.list.next != ml);
+
+ if (*list == ml)
+ *list = ml->node.list.next;
+ ml->node.list.prev->node.list.next = ml->node.list.next;
+ ml->node.list.next->node.list.prev = ml->node.list.prev;
+ }
+ ml->flags &= ~ERTS_ML_FLG_IN_TABLE;
+}
+
+ERTS_GLB_INLINE ErtsMonLnkNode *
+erts_ml_dl_list_first__(ErtsMonLnkNode *list)
+{
+ return list;
+}
+
+ERTS_GLB_INLINE ErtsMonLnkNode *
+erts_ml_dl_list_last__(ErtsMonLnkNode *list)
+{
+ if (!list)
+ return NULL;
+ return list->node.list.prev;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Monitor Operations *
+\* */
+
+
+typedef struct ErtsMonLnkNode__ ErtsMonitor;
+
+typedef struct {
+ ErtsMonitor origin;
+ ErtsMonitor target;
+ Eterm ref;
+ erts_atomic32_t refc;
+} ErtsMonitorData;
+
+typedef struct {
+ ErtsMonitorData md;
+ ErtsORefThing oref_thing;
+} ErtsMonitorDataHeap;
+
+typedef struct ErtsMonitorDataExtended__ ErtsMonitorDataExtended;
+
+struct ErtsMonitorDataExtended__ {
+ ErtsMonitorData md;
+ union {
+ Eterm name;
+ Uint refc;
+ } u;
+ union {
+ struct erl_off_heap_header *ohhp;
+ ErtsMonitor *node_monitors;
+ } uptr;
+ ErtsMonLnkDist *dist;
+ Eterm heap[1]; /* heap start... */
+};
+
+typedef struct ErtsMonitorSuspend__ ErtsMonitorSuspend;
+
+struct ErtsMonitorSuspend__ {
+ ErtsMonitorData md; /* origin = suspender; target = suspendee */
+ ErtsMonitorSuspend *next;
+ erts_atomic_t state;
+};
+#define ERTS_MSUSPEND_STATE_FLG_ACTIVE ((erts_aint_t) (((Uint) 1) << (sizeof(Uint)*8 - 1)))
+#define ERTS_MSUSPEND_STATE_COUNTER_MASK (~ERTS_MSUSPEND_STATE_FLG_ACTIVE)
+
+/*
+ * --- Monitor tree operations ---
+ */
+
+/**
+ *
+ * @brief Lookup a monitor in a monitor tree
+ *
+ *
+ * @param[in] root Pointer to root of monitor tree
+ *
+ * @param[in] key Key of monitor to lookup
+ *
+ * @returns Pointer to a monitor with the
+ * key 'key', or NULL if no such
+ * monitor was found
+ *
+ */
+ErtsMonitor *erts_monitor_tree_lookup(ErtsMonitor *root, Eterm key);
+
+/**
+ *
+ * @brief Lookup or insert a monitor in a monitor tree
+ *
+ * When the funcion is called it is assumed that:
+ * - 'mon' monitor is not part of any tree or list
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of monitor tree
+ *
+ * @param[in] mon Monitor to insert if no monitor
+ * with the same key already exists
+ *
+ * @returns Pointer to a monitor with the
+ * key 'key'. If no monitor with the key
+ * 'key' was found and 'mon' was inserted
+ * 'mon' is returned.
+ *
+ */
+ErtsMonitor *erts_monotor_tree_lookup_insert(ErtsMonitor **root,
+ ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Lookup or create a node or a nodes monitor in a monitor tree.
+ *
+ * Looks up an origin monitor with the key 'target' in the monitor tree.
+ * If it is not found, creates a monitor and returns a pointer to the
+ * origin monitor.
+ *
+ * When the funcion is called it is assumed that:
+ * - no target monitors with the key 'target' exists in the tree.
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of monitor tree
+ *
+ * @param[out] created Pointer to integer. The integer is set to
+ * a non-zero value if no monitor with key
+ * 'target' was found, and a new monitor
+ * was created. If a monitor was found, it
+ * is set to zero.
+ *
+ * @param[in] type ERTS_MON_TYPE_NODE | ERTS_MON_TYPE_NODES
+ *
+ * @param[in] origin The key of the origin
+ *
+ * @param[in] target The key of the target
+ *
+ */
+ErtsMonitor *erts_monitor_tree_lookup_create(ErtsMonitor **root, int *created,
+ Uint16 type, Eterm origin,
+ Eterm target);
+
+/**
+ *
+ * @brief Insert a monitor in a monitor tree
+ *
+ * When the funcion is called it is assumed that:
+ * - no monitors with the same key that 'mon' exist in the tree
+ * - 'mon' is not part of any list of tree
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of monitor tree
+ *
+ * @param[in] mon Monitor to insert.
+ *
+ */
+void erts_monitor_tree_insert(ErtsMonitor **root, ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Replace a monitor in a monitor tree
+ *
+ * When the funcion is called it is assumed that:
+ * - 'old' monitor and 'new' monitor have exactly the same key
+ * - 'old' monitor is part of the tree
+ * - 'new' monitor is not part of any tree or list
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of monitor tree
+ *
+ * @param[in] old Monitor to remove from the tree
+ *
+ * @param[in] new Monitor to insert into the tree
+ *
+ */
+void erts_monitor_tree_replace(ErtsMonitor **root, ErtsMonitor *old,
+ ErtsMonitor *new);
+
+/**
+ *
+ * @brief Delete a monitor from a monitor tree
+ *
+ * When the funcion is called it is assumed that:
+ * - 'mon' monitor is part of the tree
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of monitor tree
+ *
+ * @param[in] mon Monitor to remove from the tree
+ *
+ */
+void erts_monitor_tree_delete(ErtsMonitor **root, ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Call a function for each monitor in a monitor tree
+ *
+ * The funcion 'func' will be called with a pointer to a monitor
+ * as first argument and 'arg' as second argument for each monitor
+ * in the tree referred to by 'root'.
+ *
+ * @param[in] root Pointer to root of monitor tree
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ */
+void erts_monitor_tree_foreach(ErtsMonitor *root,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg);
+
+/**
+ *
+ * @brief Call a function for each monitor in a monitor tree. Yield
+ * if lots of monitors exist.
+ *
+ * The funcion 'func' will be called with a pointer to a monitor
+ * as first argument and 'arg' as second argument for each monitor
+ * in the tree referred to by 'root'.
+ *
+ * It is assumed that:
+ * - *yspp equals NULL on first call
+ * - this function is repetedly called with *yspp set
+ * as set when previous call returned until a non-zero
+ * value is returned.
+ * - no modifications are made on the tree between first call
+ * and the call that returns a non-zero value
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in] root Pointer to root of monitor tree
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ * @param[in,out] vyspp Pointer to a pointer to an internal state
+ * used by this function. At initial call
+ * *yspp should be NULL. When done *yspp
+ * will be NULL.
+ *
+ * @param[in] limit Maximum amount of monitors to process
+ * before yielding.
+ *
+ * @returns A non-zero value when all monitors has been
+ * processed, and zero when more work is needed.
+ *
+ */
+int erts_monitor_tree_foreach_yielding(ErtsMonitor *root,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit);
+
+/**
+ *
+ * @brief Delete all monitors from a monitor tree and call a function for
+ * each monitor
+ *
+ * The funcion 'func' will be called with a pointer to a monitor
+ * as first argument and 'arg' as second argument for each monitor
+ * in the tree referred to by 'root'.
+ *
+ * @param[in,out] root Pointer to pointer to root of monitor tree
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ */
+void erts_monitor_tree_foreach_delete(ErtsMonitor **root,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg);
+
+/**
+ *
+ * @brief Delete all monitors from a monitor tree and call a function for
+ * each monitor
+ *
+ * The funcion 'func' will be called with a pointer to a monitor
+ * as first argument and 'arg' as second argument for each monitor
+ * in the tree referred to by 'root'.
+ *
+ * It is assumed that:
+ * - *yspp equals NULL on first call
+ * - this function is repetededly called with *yspp set
+ * as set when previous call returned until a non-zero
+ * value is returned.
+ * - no modifications are made on the tree between first call
+ * and the call that returns a non-zero value
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of monitor tree
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ * @param[in,out] vyspp Pointer to a pointer to an internal state
+ * used by this function. At initial call
+ * *yspp should be NULL. When done *yspp
+ * will be NULL.
+ *
+ * @param[in] limit Maximum amount of monitors to process
+ * before yielding.
+ *
+ * @returns A non-zero value when all monitors has been
+ * processed, and zero when more work is needed.
+ *
+ */
+int erts_monitor_tree_foreach_delete_yielding(ErtsMonitor **root,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit);
+
+/*
+ * --- Monitor list operations --
+ */
+
+/**
+ *
+ * @brief Insert a monitor in a monitor list
+ *
+ * When the funcion is called it is assumed that:
+ * - 'mon' monitor is not part of any list or tree
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in,out] list Pointer to pointer to monitor list
+ *
+ * @param[in] mon Monitor to insert
+ *
+ */
+ERTS_GLB_INLINE void erts_monitor_list_insert(ErtsMonitor **list, ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Delete a monitor from a monitor list
+ *
+ * When the funcion is called it is assumed that:
+ * - 'mon' monitor is part of the list
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in,out] list Pointer to pointer to monitor list
+ *
+ * @param[in] mon Monitor to remove from the list
+ *
+ */
+ERTS_GLB_INLINE void erts_monitor_list_delete(ErtsMonitor **list, ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Get a pointer to first monitor in a monitor list
+ *
+ * The monitor will still remain in the list after the return
+ *
+ * @param[in] list Pointer to monitor list
+ *
+ * @returns Pointer to first monitor in list if
+ * list is not empty. If list is empty
+ * NULL is returned.
+ *
+ */
+ERTS_GLB_INLINE ErtsMonitor *erts_monitor_list_first(ErtsMonitor *list);
+
+/**
+ *
+ * @brief Get a pointer to last monitor in a monitor list
+ *
+ * The monitor will still remain in the list after the return
+ *
+ * @param[in] list Pointer to monitor list
+ *
+ * @returns Pointer to last monitor in list if
+ * list is not empty. If list is empty
+ * NULL is returned.
+ *
+ */
+ERTS_GLB_INLINE ErtsMonitor *erts_monitor_list_last(ErtsMonitor *list);
+
+/**
+ *
+ * @brief Call a function for each monitor in a monitor list
+ *
+ * The funcion 'func' will be called with a pointer to a monitor
+ * as first argument and 'arg' as second argument for each monitor
+ * in the tree referred to by 'list'.
+ *
+ * @param[in] list Pointer to root of monitor list
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ */
+void erts_monitor_list_foreach(ErtsMonitor *list,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg);
+
+/**
+ *
+ * @brief Call a function for each monitor in a monitor list. Yield
+ * if lots of monitors exist.
+ *
+ * The funcion 'func' will be called with a pointer to a monitor
+ * as first argument and 'arg' as second argument for each monitor
+ * in the tree referred to by 'root'.
+ *
+ * It is assumed that:
+ * - *yspp equals NULL on first call
+ * - this function is repetedly called with *yspp set
+ * as set when previous call returned until a non-zero
+ * value is returned.
+ * - no modifications are made on the tree between first call
+ * and the call that returns a non-zero value
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in] list Pointer to monitor list
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ * @param[in,out] vyspp Pointer to a pointer to an internal state
+ * used by this function. At initial call
+ * *yspp should be NULL. When done *yspp
+ * will be NULL.
+ *
+ * @param[in] limit Maximum amount of monitors to process
+ * before yielding.
+ *
+ * @returns A non-zero value when all monitors has been
+ * processed, and zero when more work is needed.
+ *
+ */
+int erts_monitor_list_foreach_yielding(ErtsMonitor *list,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit);
+
+/**
+ *
+ * @brief Delete all monitors from a monitor list and call a function for
+ * each monitor
+ *
+ * The funcion 'func' will be called with a pointer to a monitor
+ * as first argument and 'arg' as second argument for each monitor
+ * in the tree referred to by 'root'.
+ *
+ * @param[in,out] list Pointer to pointer to monitor list
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ */
+void erts_monitor_list_foreach_delete(ErtsMonitor **list,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg);
+
+/**
+ *
+ * @brief Delete all monitors from a monitor list and call a function for
+ * each monitor
+ *
+ * The funcion 'func' will be called with a pointer to a monitor
+ * as first argument and 'arg' as second argument for each monitor
+ * in the tree referred to by 'root'.
+ *
+ * It is assumed that:
+ * - *yspp equals NULL on first call
+ * - this function is repetededly called with *yspp set
+ * as set when previous call returned until a non-zero
+ * value is returned.
+ * - no modifications are made on the tree between first
+ * and the call that returns a non-zero value
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in,out] list Pointer to pointer to monitor list
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ * @param[in,out] vyspp Pointer to a pointer to an internal state
+ * used by this function. At initial call
+ * *yspp should be NULL. When done *yspp
+ * will be NULL.
+ *
+ * @param[in] limit Maximum amount of monitors to process
+ * before yielding.
+ *
+ * @returns A non-zero value when all monitors has been
+ * processed, and zero when more work is needed.
+ *
+ */
+int erts_monitor_list_foreach_delete_yielding(ErtsMonitor **list,
+ void (*func)(ErtsMonitor *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit);
+
+/*
+ * --- Misc monitor operations ---
+ */
+
+/**
+ *
+ * @brief Create a monitor
+ *
+ * Can create all types of monitors
+ *
+ * When the funcion is called it is assumed that:
+ * - 'ref' is an internal ordinary reference if type is ERTS_MON_TYPE_PROC,
+ * ERTS_MON_TYPE_PORT, ERTS_MON_TYPE_TIME_OFFSET, or ERTS_MON_TYPE_RESOURCE
+ * - 'ref' is NIL if type is ERTS_MON_TYPE_NODE, ERTS_MON_TYPE_NODES, or
+ * ERTS_MON_TYPE_SUSPEND
+ * - 'ref' is and ordinary internal reference or an external reference if
+ * type is ERTS_MON_TYPE_DIST_PROC
+ * - 'name' is an atom or NIL if type is ERTS_MON_TYPE_PROC,
+ * ERTS_MON_TYPE_PORT, or ERTS_MON_TYPE_DIST_PROC
+ * - 'name is NIL if type is ERTS_MON_TYPE_TIME_OFFSET, ERTS_MON_TYPE_RESOURCE,
+ * ERTS_MON_TYPE_NODE, ERTS_MON_TYPE_NODES, or ERTS_MON_TYPE_SUSPEND
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in] type ERTS_MON_TYPE_PROC, ERTS_MON_TYPE_PORT,
+ * ERTS_MON_TYPE_TIME_OFFSET, ERTS_MON_TYPE_DIST_PROC,
+ * ERTS_MON_TYPE_RESOURCE, ERTS_MON_TYPE_NODE,
+ * ERTS_MON_TYPE_NODES, or ERTS_MON_TYPE_SUSPEND
+ *
+ * @param[in] ref A reference or NIL depending on type
+ *
+ * @param[in] origin The key of the origin
+ *
+ * @param[in] target The key of the target
+ *
+ * @param[in] name An atom (the name) or NIL depending on type
+ *
+ * @returns A pointer to monitor data structure
+ *
+ */
+ErtsMonitorData *erts_monitor_create(Uint16 type, Eterm ref, Eterm origin,
+ Eterm target, Eterm name);
+
+/**
+ *
+ * @brief Get pointer to monitor data structure
+ *
+ * @param[in] mon Pointer to monitor
+ *
+ * @returns Pointer to monitor data structure
+ *
+ */
+ERTS_GLB_INLINE ErtsMonitorData *erts_monitor_to_data(ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Check if monitor is a target monitor
+ *
+ * @param[in] mon Pointer to monitor to check
+ *
+ * @returns A non-zero value if target monitor;
+ * otherwise zero
+ *
+ */
+ERTS_GLB_INLINE int erts_monitor_is_target(ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Check if monitor is an origin monitor
+ *
+ * @param[in] mon Pointer to monitor to check
+ *
+ * @returns A non-zero value if origin monitor;
+ * otherwise zero
+ *
+ */
+ERTS_GLB_INLINE int erts_monitor_is_origin(ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Check if monitor is in tree or list
+ *
+ * @param[in] mon Pointer to monitor to check
+ *
+ * @returns A non-zero value if in tree or list;
+ * otherwise zero
+ *
+ */
+ERTS_GLB_INLINE int erts_monitor_is_in_table(ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Release monitor
+ *
+ * When both the origin and the target part of the monitor have
+ * been released the monitor structure will be deallocated.
+ *
+ * When the funcion is called it is assumed that:
+ * - 'mon' monitor is not part of any list or tree
+ * - 'mon' is not referred to by any other structures
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in] mon Pointer to monitor
+ *
+ */
+ERTS_GLB_INLINE void erts_monitor_release(ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Release both target and origin monitor structures simultaneously
+ *
+ * Release both the origin and target parts of the monitor
+ * simultaneously and deallocate the structure.
+ *
+ * When the funcion is called it is assumed that:
+ * - Neither the origin part nor the target part of the monitor
+ * are not part of any list or tree
+ * - Neither the origin part nor the target part of the monitor
+ * are referred to by any other structures
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in] mdp Pointer to monitor data structure
+ *
+ */
+ERTS_GLB_INLINE void erts_monitor_release_both(ErtsMonitorData *mdp);
+
+/**
+ *
+ * @brief Insert monitor in dist monitor tree or list
+ *
+ * When the funcion is called it is assumed that:
+ * - 'mon' monitor is not part of any list or tree
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in] mon Pointer to monitor
+ *
+ * @param[in] dist Pointer to dist structure
+ *
+ * @returns A non-zero value if inserted;
+ * otherwise, zero. The monitor
+ * is not inserted if the dist
+ * structure has been set in a
+ * dead state.
+ *
+ */
+ERTS_GLB_INLINE int erts_monitor_dist_insert(ErtsMonitor *mon, ErtsMonLnkDist *dist);
+
+/**
+ *
+ * @brief Delete monitor from dist monitor tree or list
+ *
+ * When the funcion is called it is assumed that:
+ * - 'mon' monitor earler has been inserted into 'dist'
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in] mon Pointer to monitor
+ *
+ * @param[in] dist Pointer to dist structure
+ *
+ * @returns A non-zero value if deleted;
+ * otherwise, zero. The monitor
+ * is not deleted if the dist
+ * structure has been set in a
+ * dead state or if it has already
+ * been deleted.
+ *
+ */
+ERTS_GLB_INLINE int erts_monitor_dist_delete(ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Set dead dist structure on monitor
+ *
+ * @param[in] mon Pointer to monitor
+ *
+ * @param[in] nodename Name of remote node
+ *
+ */
+void
+erts_monitor_set_dead_dist(ErtsMonitor *mon, Eterm nodename);
+
+/**
+ *
+ * @brief Get charged size of monitor
+ *
+ * If the other side of the monitor has been released, the
+ * whole size of the monitor data structure is returned; otherwise,
+ * half of the size is returned.
+ *
+ * When the funcion is called it is assumed that:
+ * - 'mon' has not been released
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in] mon Pointer to monitor
+ *
+ * @returns Charged size in bytes
+ *
+ */
+Uint erts_monitor_size(ErtsMonitor *mon);
+
+
+/* internal function... */
+void erts_monitor_destroy__(ErtsMonitorData *mdp);
+
+/* implementations for globally inlined monitor functions... */
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_monitor_is_target(ErtsMonitor *mon)
+{
+ return !!(mon->flags & ERTS_ML_FLG_TARGET);
+}
+
+ERTS_GLB_INLINE int
+erts_monitor_is_origin(ErtsMonitor *mon)
+{
+ return !(mon->flags & ERTS_ML_FLG_TARGET);
+}
+
+ERTS_GLB_INLINE int
+erts_monitor_is_in_table(ErtsMonitor *mon)
+{
+ return !!(mon->flags & ERTS_ML_FLG_IN_TABLE);
+}
+
+ERTS_GLB_INLINE void
+erts_monitor_list_insert(ErtsMonitor **list, ErtsMonitor *mon)
+{
+ erts_ml_dl_list_insert__((ErtsMonLnkNode **) list, (ErtsMonLnkNode *) mon);
+}
+
+ERTS_GLB_INLINE void
+erts_monitor_list_delete(ErtsMonitor **list, ErtsMonitor *mon)
+{
+ erts_ml_dl_list_delete__((ErtsMonLnkNode **) list, (ErtsMonLnkNode *) mon);
+}
+
+ERTS_GLB_INLINE ErtsMonitor *
+erts_monitor_list_first(ErtsMonitor *list)
+{
+ return (ErtsMonitor *) erts_ml_dl_list_first__((ErtsMonLnkNode *) list);
+}
+
+ERTS_GLB_INLINE ErtsMonitor *
+erts_monitor_list_last(ErtsMonitor *list)
+{
+ return (ErtsMonitor *) erts_ml_dl_list_last__((ErtsMonLnkNode *) list);
+}
+
+#ifdef ERTS_ML_DEBUG
+extern size_t erts_monitor_origin_offset;
+extern size_t erts_monitor_origin_key_offset;
+extern size_t erts_monitor_target_offset;
+extern size_t erts_monitor_target_key_offset;
+extern size_t erts_monitor_node_key_offset;
+#endif
+
+ERTS_GLB_INLINE ErtsMonitorData *
+erts_monitor_to_data(ErtsMonitor *mon)
+{
+ ErtsMonitorData *mdp = erts_ml_node_to_main_struct__((ErtsMonLnkNode *) mon);
+
+#ifdef ERTS_ML_DEBUG
+ ERTS_ML_ASSERT(!(mdp->origin.flags & ERTS_ML_FLG_TARGET));
+ ERTS_ML_ASSERT(erts_monitor_origin_offset == (size_t) mdp->origin.offset);
+ ERTS_ML_ASSERT(!!(mdp->target.flags & ERTS_ML_FLG_TARGET));
+ ERTS_ML_ASSERT(erts_monitor_target_offset == (size_t) mdp->target.offset);
+ if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES
+ || mon->type == ERTS_MON_TYPE_SUSPEND) {
+ ERTS_ML_ASSERT(erts_monitor_node_key_offset == (size_t) mdp->origin.key_offset);
+ ERTS_ML_ASSERT(erts_monitor_node_key_offset == (size_t) mdp->target.key_offset);
+ }
+ else {
+ ERTS_ML_ASSERT(erts_monitor_origin_key_offset == (size_t) mdp->origin.key_offset);
+ ERTS_ML_ASSERT(erts_monitor_target_key_offset == (size_t) mdp->target.key_offset);
+ }
+#endif
+
+ return mdp;
+}
+
+ERTS_GLB_INLINE void
+erts_monitor_release(ErtsMonitor *mon)
+{
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
+ ERTS_ML_ASSERT(erts_atomic32_read_nob(&mdp->refc) > 0);
+
+ if (erts_atomic32_dec_read_nob(&mdp->refc) == 0) {
+ ERTS_ML_ASSERT(!(mdp->origin.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE));
+
+ erts_monitor_destroy__(mdp);
+ }
+}
+
+ERTS_GLB_INLINE void
+erts_monitor_release_both(ErtsMonitorData *mdp)
+{
+ ERTS_ML_ASSERT((mdp->origin.flags & ERTS_ML_FLGS_SAME)
+ == (mdp->target.flags & ERTS_ML_FLGS_SAME));
+ ERTS_ML_ASSERT(erts_atomic32_read_nob(&mdp->refc) >= 2);
+
+ if (erts_atomic32_add_read_nob(&mdp->refc, (erts_aint32_t) -2) == 0) {
+ ERTS_ML_ASSERT(!(mdp->origin.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE));
+
+ erts_monitor_destroy__(mdp);
+ }
+}
+
+ERTS_GLB_INLINE int
+erts_monitor_dist_insert(ErtsMonitor *mon, ErtsMonLnkDist *dist)
+{
+ ErtsMonitorDataExtended *mdep;
+ int insert;
+
+ ERTS_ML_ASSERT(mon->flags & ERTS_ML_FLG_EXTENDED);
+ ERTS_ML_ASSERT(mon->type == ERTS_MON_TYPE_DIST_PROC
+ || mon->type == ERTS_MON_TYPE_NODE);
+
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
+
+ ERTS_ML_ASSERT(!mdep->dist);
+ ERTS_ML_ASSERT(dist);
+ mdep->dist = dist;
+
+ erts_mon_link_dist_inc_refc(dist);
+
+ erts_mtx_lock(&dist->mtx);
+
+ insert = dist->alive;
+ if (insert) {
+ if ((mon->flags & (ERTS_ML_FLG_NAME
+ | ERTS_ML_FLG_TARGET)) == ERTS_ML_FLG_NAME)
+ erts_monitor_tree_insert(&dist->orig_name_monitors, mon);
+ else
+ erts_monitor_list_insert(&dist->monitors, mon);
+ }
+
+ erts_mtx_unlock(&dist->mtx);
+
+ return insert;
+}
+
+ERTS_GLB_INLINE int
+erts_monitor_dist_delete(ErtsMonitor *mon)
+{
+ ErtsMonitorDataExtended *mdep;
+ ErtsMonLnkDist *dist;
+ Uint16 flags;
+ int delete;
+
+ ERTS_ML_ASSERT(mon->flags & ERTS_ML_FLG_EXTENDED);
+ ERTS_ML_ASSERT(mon->type == ERTS_MON_TYPE_DIST_PROC
+ || mon->type == ERTS_MON_TYPE_NODE);
+
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
+ dist = mdep->dist;
+ ERTS_ML_ASSERT(dist);
+
+ erts_mtx_lock(&dist->mtx);
+
+ flags = mon->flags;
+ delete = !!dist->alive & !!(flags & ERTS_ML_FLG_IN_TABLE);
+ if (delete) {
+ if ((flags & (ERTS_ML_FLG_NAME
+ | ERTS_ML_FLG_TARGET)) == ERTS_ML_FLG_NAME)
+ erts_monitor_tree_delete(&dist->orig_name_monitors, mon);
+ else
+ erts_monitor_list_delete(&dist->monitors, mon);
+ }
+
+ erts_mtx_unlock(&dist->mtx);
+
+ return delete;
+}
+
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/* suspend monitors... */
+ErtsMonitorSuspend *erts_monitor_suspend_create(Eterm pid);
+ErtsMonitorSuspend *erts_monitor_suspend_tree_lookup_create(ErtsMonitor **root,
+ int *created,
+ Eterm pid);
+void erts_monitor_suspend_destroy(ErtsMonitorSuspend *msp);
+
+ERTS_GLB_INLINE ErtsMonitorSuspend *erts_monitor_suspend(ErtsMonitor *mon);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE ErtsMonitorSuspend *erts_monitor_suspend(ErtsMonitor *mon)
+{
+ ERTS_ML_ASSERT(!mon || mon->type == ERTS_MON_TYPE_SUSPEND);
+ return (ErtsMonitorSuspend *) mon;
+}
+
+#endif
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Link Operations *
+\* */
+
+typedef struct ErtsMonLnkNode__ ErtsLink;
+
+typedef struct {
+ ErtsLink a;
+ ErtsLink b;
+ erts_atomic32_t refc;
+} ErtsLinkData;
+
+typedef struct {
+ ErtsLinkData ld;
+ struct erl_off_heap_header *ohhp;
+ ErtsMonLnkDist *dist;
+ Eterm heap[1]; /* heap start... */
+} ErtsLinkDataExtended;
+
+/*
+ * --- Link tree operations ---
+ */
+
+/**
+ *
+ * @brief Lookup a link in a link tree
+ *
+ *
+ * @param[in] root Pointer to root of link tree
+ *
+ * @param[in] key Key of link to lookup
+ *
+ * @returns Pointer to a link with the
+ * key 'key', or NULL if no such
+ * link was found
+ *
+ */
+ErtsLink *erts_link_tree_lookup(ErtsLink *root, Eterm item);
+
+/**
+ *
+ * @brief Lookup or insert a link in a link tree
+ *
+ * When the funcion is called it is assumed that:
+ * - 'lnk' link is not part of any tree or list
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of link tree
+ *
+ * @param[in] lnk Link to insert if no link
+ * with the same key already exists
+ *
+ * @returns Pointer to a link with the
+ * key 'key'. If no link with the key
+ * 'key' was found and 'lnk' was inserted
+ * 'lnk' is returned.
+ *
+ */
+ErtsLink *erts_link_tree_lookup_insert(ErtsLink **root, ErtsLink *lnk);
+
+/**
+ *
+ * @brief Lookup or create a link in a link tree.
+ *
+ * Looks up a link with the key 'other' in the link tree. If it is not
+ * found, creates and insert a link with the key 'other'.
+ *
+ * @param[in,out] root Pointer to pointer to root of link tree
+ *
+ * @param[out] created Pointer to integer. The integer is set to
+ * a non-zero value if no link with key
+ * 'other' was found, and a new link
+ * was created. If a link was found, it
+ * is set to zero.
+ *
+ * @param[in] type Type of link
+ *
+ * @param[in] this Id of this entity
+ *
+ * @param[in] other Id of other entity
+ *
+ */
+ErtsLink *erts_link_tree_lookup_create(ErtsLink **root, int *created,
+ Uint16 type, Eterm this, Eterm other);
+
+/**
+ *
+ * @brief Insert a link in a link tree
+ *
+ * When the funcion is called it is assumed that:
+ * - no links with the same key that 'lnk' exist in the tree
+ * - 'lnk' is not part of any list of tree
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of link tree
+ *
+ * @param[in] lnk Link to insert.
+ *
+ */
+void erts_link_tree_insert(ErtsLink **root, ErtsLink *lnk);
+
+/**
+ *
+ * @brief Replace a link in a link tree
+ *
+ * When the funcion is called it is assumed that:
+ * - 'old' link and 'new' link have exactly the same key
+ * - 'old' link is part of the tree
+ * - 'new' link is not part of any tree or list
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of link tree
+ *
+ * @param[in] old Link to remove from the tree
+ *
+ * @param[in] new Link to insert into the tree
+ *
+ */
+void erts_link_tree_replace(ErtsLink **root, ErtsLink *old, ErtsLink *new);
+
+/**
+ *
+ * @brief Replace a link in a link tree if key already exist based on adress
+ *
+ * Inserts the link 'lnk' in the tree if no link with the same key
+ * already exists in tree. If a link with the same key exists in
+ * the tree and 'lnk' has a lower address than the link in the
+ * tree, the existing link in the tree is replaced by 'lnk'.
+ *
+ * When the funcion is called it is assumed that:
+ * - 'lnk' link is not part of any tree or list
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of link tree
+ *
+ * @param[in] lnk Link to insert into the tree
+ *
+ * @returns A pointer to the link that is not part
+ * of the tree after this operation.
+ *
+ */
+ERTS_GLB_INLINE ErtsLink *erts_link_tree_insert_addr_replace(ErtsLink **root,
+ ErtsLink *lnk);
+
+/**
+ *
+ * @brief Delete a link from a link tree
+ *
+ * When the funcion is called it is assumed that:
+ * - 'lnk' link is part of the tree
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of link tree
+ *
+ * @param[in] lnk Link to remove from the tree
+ *
+ */
+void erts_link_tree_delete(ErtsLink **root, ErtsLink *lnk);
+
+/**
+ *
+ * @brief Delete a link from a link tree based on key
+ *
+ * If link 'lnk' is in the tree, 'lnk' is deleted from the tree.
+ * If link 'lnk' is not in the tree, another link with the same
+ * key as 'lnk' is deleted from the tree if such a link exist.
+ *
+ * When the funcion is called it is assumed that:
+ * - if 'lnk' link is part of a tree or list, it is part of this tree
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of link tree
+ *
+ * @param[in] lnk Link to remove from the tree
+ *
+ * @returns A pointer to the link that was deleted
+ * from the tree, or NULL in case no link
+ * was deleted from the tree
+ *
+ */
+ERTS_GLB_INLINE ErtsLink *erts_link_tree_key_delete(ErtsLink **root, ErtsLink *lnk);
+
+/**
+ *
+ * @brief Call a function for each link in a link tree
+ *
+ * The funcion 'func' will be called with a pointer to a link
+ * as first argument and 'arg' as second argument for each link
+ * in the tree referred to by 'root'.
+ *
+ * @param[in] root Pointer to root of link tree
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ */
+void erts_link_tree_foreach(ErtsLink *root,
+ void (*func)(ErtsLink *, void *),
+ void *arg);
+
+/**
+ *
+ * @brief Call a function for each link in a link tree. Yield if lots
+ * of links exist.
+ *
+ * The funcion 'func' will be called with a pointer to a link
+ * as first argument and 'arg' as second argument for each link
+ * in the tree referred to by 'root'.
+ *
+ * It is assumed that:
+ * - *yspp equals NULL on first call
+ * - this function is repetedly called with *yspp set
+ * as set when previous call returned until a non-zero
+ * value is returned.
+ * - no modifications are made on the tree between first call
+ * and the call that returns a non-zero value
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in] root Pointer to root of link tree
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ * @param[in,out] vyspp Pointer to a pointer to an internal state
+ * used by this function. At initial call
+ * *yspp should be NULL. When done *yspp
+ * will be NULL.
+ *
+ * @param[in] limit Maximum amount of links to process
+ * before yielding.
+ *
+ * @returns A non-zero value when all links has been
+ * processed, and zero when more work is needed.
+ *
+ */
+int erts_link_tree_foreach_yielding(ErtsLink *root,
+ void (*func)(ErtsLink *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit);
+
+/**
+ *
+ * @brief Delete all links from a link tree and call a function for
+ * each link
+ *
+ * The funcion 'func' will be called with a pointer to a link
+ * as first argument and 'arg' as second argument for each link
+ * in the tree referred to by 'root'.
+ *
+ * @param[in,out] root Pointer to pointer to root of link tree
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ */
+void erts_link_tree_foreach_delete(ErtsLink **root,
+ void (*func)(ErtsLink *, void *),
+ void *arg);
+
+/**
+ *
+ * @brief Delete all links from a link tree and call a function for
+ * each link
+ *
+ * The funcion 'func' will be called with a pointer to a link
+ * as first argument and 'arg' as second argument for each link
+ * in the tree referred to by 'root'.
+ *
+ * It is assumed that:
+ * - *yspp equals NULL on first call
+ * - this function is repetededly called with *yspp set
+ * as set when previous call returned until a non-zero
+ * value is returned.
+ * - no modifications are made on the tree between first call
+ * and the call that returns a non-zero value
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in,out] root Pointer to pointer to root of link tree
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ * @param[in,out] vyspp Pointer to a pointer to an internal state
+ * used by this function. At initial call
+ * *yspp should be NULL. When done *yspp
+ * will be NULL.
+ *
+ * @param[in] limit Maximum amount of links to process
+ * before yielding.
+ *
+ * @returns A non-zero value when all links has been
+ * processed, and zero when more work is needed.
+ *
+ */
+int erts_link_tree_foreach_delete_yielding(ErtsLink **root,
+ void (*func)(ErtsLink *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit);
+
+/*
+ * --- Link list operations ---
+ */
+
+/**
+ *
+ * @brief Insert a link in a link list
+ *
+ * When the funcion is called it is assumed that:
+ * - 'lnk' link is not part of any list or tree
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in,out] list Pointer to pointer to link list
+ *
+ * @param[in] lnk Link to insert
+ *
+ */
+ERTS_GLB_INLINE void erts_link_list_insert(ErtsLink **list, ErtsLink *lnk);
+
+/**
+ *
+ * @brief Delete a link from a link list
+ *
+ * When the funcion is called it is assumed that:
+ * - 'lnk' link is part of the list
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in,out] list Pointer to pointer to link list
+ *
+ * @param[in] lnk Link to remove from the list
+ *
+ */
+ERTS_GLB_INLINE void erts_link_list_delete(ErtsLink **list, ErtsLink *lnk);
+
+/**
+ *
+ * @brief Get a pointer to first link in a link list
+ *
+ * The link will still remain in the list after the return
+ *
+ * @param[in] list Pointer to link list
+ *
+ * @returns Pointer to first link in list if
+ * list is not empty. If list is empty
+ * NULL is returned.
+ *
+ */
+ERTS_GLB_INLINE ErtsLink *erts_link_list_first(ErtsLink *list);
+
+/**
+ *
+ * @brief Get a pointer to last link in a link list
+ *
+ * The link will still remain in the list after the return
+ *
+ * @param[in] list Pointer to link list
+ *
+ * @returns Pointer to last link in list if
+ * list is not empty. If list is empty
+ * NULL is returned.
+ *
+ */
+ERTS_GLB_INLINE ErtsLink *erts_link_list_last(ErtsLink *list);
+
+/**
+ *
+ * @brief Call a function for each link in a link list
+ *
+ * The funcion 'func' will be called with a pointer to a link
+ * as first argument and 'arg' as second argument for each link
+ * in the tree referred to by 'list'.
+ *
+ * @param[in] list Pointer to root of link list
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ */
+void erts_link_list_foreach(ErtsLink *list,
+ void (*func)(ErtsLink *, void *),
+ void *arg);
+
+/**
+ *
+ * @brief Call a function for each link in a link list. Yield
+ * if lots of links exist.
+ *
+ * The funcion 'func' will be called with a pointer to a link
+ * as first argument and 'arg' as second argument for each link
+ * in the tree referred to by 'root'.
+ *
+ * It is assumed that:
+ * - *yspp equals NULL on first call
+ * - this function is repetedly called with *yspp set
+ * as set when previous call returned until a non-zero
+ * value is returned.
+ * - no modifications are made on the tree between first call
+ * and the call that returns a non-zero value
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in] list Pointer to link list
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ * @param[in,out] vyspp Pointer to a pointer to an internal state
+ * used by this function. At initial call
+ * *yspp should be NULL. When done *yspp
+ * will be NULL.
+ *
+ * @param[in] limit Maximum amount of links to process
+ * before yielding.
+ *
+ * @returns A non-zero value when all links has been
+ * processed, and zero when more work is needed.
+ *
+ */
+int erts_link_list_foreach_yielding(ErtsLink *list,
+ void (*func)(ErtsLink *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit);
+
+/**
+ *
+ * @brief Delete all links from a link list and call a function for
+ * each link
+ *
+ * The funcion 'func' will be called with a pointer to a link
+ * as first argument and 'arg' as second argument for each link
+ * in the tree referred to by 'root'.
+ *
+ * @param[in,out] list Pointer to pointer to link list
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ */
+void erts_link_list_foreach_delete(ErtsLink **list,
+ void (*func)(ErtsLink *, void *),
+ void *arg);
+
+/**
+ *
+ * @brief Delete all links from a link list and call a function for
+ * each link
+ *
+ * The funcion 'func' will be called with a pointer to a link
+ * as first argument and 'arg' as second argument for each link
+ * in the tree referred to by 'root'.
+ *
+ * It is assumed that:
+ * - *yspp equals NULL on first call
+ * - this function is repetededly called with *yspp set
+ * as set when previous call returned until a non-zero
+ * value is returned.
+ * - no modifications are made on the tree between first
+ * and the call that returns a non-zero value
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in,out] list Pointer to pointer to link list
+ *
+ * @param[in] func Pointer to function to call
+ *
+ * @param[in] arg Argument to pass as second argument in
+ * calls to 'func'
+ *
+ * @param[in,out] vyspp Pointer to a pointer to an internal state
+ * used by this function. At initial call
+ * *yspp should be NULL. When done *yspp
+ * will be NULL.
+ *
+ * @param[in] limit Maximum amount of links to process
+ * before yielding.
+ *
+ * @returns A non-zero value when all links has been
+ * processed, and zero when more work is needed.
+ *
+ */
+int erts_link_list_foreach_delete_yielding(ErtsLink **list,
+ void (*func)(ErtsLink *, void *),
+ void *arg,
+ void **vyspp,
+ Sint limit);
+
+/*
+ * --- Misc link operations ---
+ */
+
+/**
+ *
+ * @brief Create a link
+ *
+ * Can create all types of links
+ *
+ * When the funcion is called it is assumed that:
+ * - 'ref' is an internal ordinary reference if type is ERTS_MON_TYPE_PROC,
+ * ERTS_MON_TYPE_PORT, ERTS_MON_TYPE_TIME_OFFSET, or ERTS_MON_TYPE_RESOURCE
+ * - 'ref' is NIL if type is ERTS_MON_TYPE_NODE or ERTS_MON_TYPE_NODES
+ * - 'ref' is and ordinary internal reference or an external reference if
+ * type is ERTS_MON_TYPE_DIST_PROC
+ * - 'name' is an atom or NIL if type is ERTS_MON_TYPE_PROC,
+ * ERTS_MON_TYPE_PORT, or ERTS_MON_TYPE_DIST_PROC
+ * - 'name is NIL if type is ERTS_MON_TYPE_TIME_OFFSET, ERTS_MON_TYPE_RESOURCE,
+ * ERTS_MON_TYPE_NODE, or ERTS_MON_TYPE_NODES
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in] type ERTS_MON_TYPE_PROC, ERTS_MON_TYPE_PORT,
+ * ERTS_MON_TYPE_TIME_OFFSET, ERTS_MON_TYPE_DIST_PROC,
+ * ERTS_MON_TYPE_RESOURCE, ERTS_MON_TYPE_NODE,
+ * or ERTS_MON_TYPE_NODES
+ *
+ * @param[in] a The key of entity a. Link structure a will
+ * have field other.item set to 'b'.
+ *
+ * @param[in] b The key of entity b. Link structure b will
+ * have field other.item set to 'a'.
+ *
+ */
+ErtsLinkData *erts_link_create(Uint16 type, Eterm a, Eterm b);
+
+/**
+ *
+ * @brief Get pointer to link data structure
+ *
+ * @param[in] lnk Pointer to link
+ *
+ * @returns Pointer to link data structure
+ *
+ */
+ERTS_GLB_INLINE ErtsLinkData *erts_link_to_data(ErtsLink *lnk);
+
+/**
+ *
+ * @brief Get pointer to the other link structure part of the link
+ *
+ * @param[in] lnk Pointer to link
+ *
+ * @param[out] ldpp Pointer to pointer to link data structure,
+ * if a non-NULL value is passed in the call
+ *
+ * @returns Pointer to other link
+ *
+ */
+ERTS_GLB_INLINE ErtsLink *erts_link_to_other(ErtsLink *lnk, ErtsLinkData **ldpp);
+
+/**
+ *
+ * @brief Check if link is in tree or list
+ *
+ * @param[in] lnk Pointer to lnk to check
+ *
+ * @returns A non-zero value if in tree or list;
+ * otherwise zero
+ *
+ */
+ERTS_GLB_INLINE int erts_link_is_in_table(ErtsLink *lnk);
+
+/**
+ *
+ * @brief Release link
+ *
+ * When both link halves part of the link have been released the link
+ * structure will be deallocated.
+ *
+ * When the funcion is called it is assumed that:
+ * - 'lnk' link is not part of any list or tree
+ * - 'lnk' is not referred to by any other structures
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in] lnk Pointer to link
+ *
+ */
+ERTS_GLB_INLINE void erts_link_release(ErtsLink *lnk);
+
+/**
+ *
+ * @brief Release both link halves of a link simultaneously
+ *
+ * Release both halves of a link simultaneously and deallocate
+ * the structure.
+ *
+ * When the funcion is called it is assumed that:
+ * - Neither of the parts of the link are part of any list or tree
+ * - Neither of the parts of the link or the link data structure
+ * are referred to by any other structures
+ * If the above are not true, bad things will happen.
+ *
+ * @param[in] mdp Pointer to link data structure
+ *
+ */
+ERTS_GLB_INLINE void erts_link_release_both(ErtsLinkData *ldp);
+
+/**
+ *
+ * @brief Insert link in dist link list
+ *
+ * When the funcion is called it is assumed that:
+ * - 'lnk' link is not part of any list or tree
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in] lnk Pointer to link
+ *
+ * @param[in] dist Pointer to dist structure
+ *
+ * @returns A non-zero value if inserted;
+ * otherwise, zero. The link
+ * is not inserted if the dist
+ * structure has been set in a
+ * dead state.
+ *
+ */
+ERTS_GLB_INLINE int erts_link_dist_insert(ErtsLink *lnk, ErtsMonLnkDist *dist);
+
+/**
+ *
+ * @brief Delete link from dist link list
+ *
+ * When the funcion is called it is assumed that:
+ * - 'lnk' link earler has been inserted into 'dist'
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in] lnk Pointer to link
+ *
+ * @param[in] dist Pointer to dist structure
+ *
+ * @returns A non-zero value if deleted;
+ * otherwise, zero. The link
+ * is not deleted if the dist
+ * structure has been set in a
+ * dead state or if it has already
+ * been deleted.
+ *
+ */
+ERTS_GLB_INLINE int erts_link_dist_delete(ErtsLink *lnk);
+
+/**
+ *
+ * @brief Set dead dist structure on link
+ *
+ * @param[in] lnk Pointer to link
+ *
+ * @param[in] nodename Name of remote node
+ *
+ */
+void
+erts_link_set_dead_dist(ErtsLink *lnk, Eterm nodename);
+
+/**
+ *
+ * @brief Get charged size of link
+ *
+ * If the other side of the link has been released, the
+ * whole size of the link data structure is returned; otherwise,
+ * half of the size is returned.
+ *
+ * When the funcion is called it is assumed that:
+ * - 'lnk' has not been released
+ * If the above is not true, bad things will happen.
+ *
+ * @param[in] lnk Pointer to link
+ *
+ * @returns Charged size in bytes
+ *
+ */
+Uint erts_link_size(ErtsLink *lnk);
+
+/* internal function... */
+void erts_link_destroy__(ErtsLinkData *ldp);
+
+/* implementations for globally inlined link functions... */
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#ifdef ERTS_ML_DEBUG
+extern size_t erts_link_a_offset;
+extern size_t erts_link_b_offset;
+extern size_t erts_link_key_offset;
+#endif
+
+ERTS_GLB_INLINE ErtsLinkData *
+erts_link_to_data(ErtsLink *lnk)
+{
+ ErtsLinkData *ldp = erts_ml_node_to_main_struct__((ErtsMonLnkNode *) lnk);
+
+#ifdef ERTS_ML_DEBUG
+ ERTS_ML_ASSERT(erts_link_a_offset == (size_t) ldp->a.offset);
+ ERTS_ML_ASSERT(erts_link_key_offset == (size_t) ldp->a.key_offset);
+ ERTS_ML_ASSERT(erts_link_b_offset == (size_t) ldp->b.offset);
+ ERTS_ML_ASSERT(erts_link_key_offset == (size_t) ldp->b.key_offset);
+#endif
+
+ return ldp;
+}
+
+ERTS_GLB_INLINE ErtsLink *
+erts_link_to_other(ErtsLink *lnk, ErtsLinkData **ldpp)
+{
+ ErtsLinkData *ldp = erts_link_to_data(lnk);
+ if (ldpp)
+ *ldpp = ldp;
+ return lnk == &ldp->a ? &ldp->b : &ldp->a;
+}
+
+ERTS_GLB_INLINE int
+erts_link_is_in_table(ErtsLink *lnk)
+{
+ return !!(lnk->flags & ERTS_ML_FLG_IN_TABLE);
+}
+
+ERTS_GLB_INLINE void
+erts_link_list_insert(ErtsLink **list, ErtsLink *lnk)
+{
+ erts_ml_dl_list_insert__((ErtsMonLnkNode **) list, (ErtsMonLnkNode *) lnk);
+}
+
+ERTS_GLB_INLINE void
+erts_link_list_delete(ErtsLink **list, ErtsLink *lnk)
+{
+ erts_ml_dl_list_delete__((ErtsMonLnkNode **) list, (ErtsMonLnkNode *) lnk);
+}
+
+ERTS_GLB_INLINE ErtsLink *
+erts_link_list_first(ErtsLink *list)
+{
+ return (ErtsLink *) erts_ml_dl_list_first__((ErtsMonLnkNode *) list);
+}
+
+ERTS_GLB_INLINE ErtsLink *
+erts_link_list_last(ErtsLink *list)
+{
+ return (ErtsLink *) erts_ml_dl_list_last__((ErtsMonLnkNode *) list);
+}
+
+ERTS_GLB_INLINE void
+erts_link_release(ErtsLink *lnk)
+{
+ ErtsLinkData *ldp = erts_link_to_data(lnk);
+ ERTS_ML_ASSERT(!(lnk->flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT(erts_atomic32_read_nob(&ldp->refc) > 0);
+ if (erts_atomic32_dec_read_nob(&ldp->refc) == 0)
+ erts_link_destroy__(ldp);
+}
+
+ERTS_GLB_INLINE void
+erts_link_release_both(ErtsLinkData *ldp)
+{
+ ERTS_ML_ASSERT(!(ldp->a.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT(!(ldp->b.flags & ERTS_ML_FLG_IN_TABLE));
+ ERTS_ML_ASSERT(erts_atomic32_read_nob(&ldp->refc) >= 2);
+ if (erts_atomic32_add_read_nob(&ldp->refc, (erts_aint32_t) -2) == 0)
+ erts_link_destroy__(ldp);
+}
+
+ERTS_GLB_INLINE ErtsLink *
+erts_link_tree_insert_addr_replace(ErtsLink **root, ErtsLink *lnk)
+{
+ ErtsLink *lnk2 = erts_link_tree_lookup_insert(root, lnk);
+ if (!lnk2)
+ return NULL;
+ if (lnk2 < lnk)
+ return lnk;
+ erts_link_tree_replace(root, lnk2, lnk);
+ return lnk2;
+}
+
+ERTS_GLB_INLINE ErtsLink *
+erts_link_tree_key_delete(ErtsLink **root, ErtsLink *lnk)
+{
+ ErtsLink *dlnk;
+ if (erts_link_is_in_table(lnk))
+ dlnk = lnk;
+ else
+ dlnk = erts_link_tree_lookup(*root, lnk->other.item);
+ if (dlnk)
+ erts_link_tree_delete(root, dlnk);
+ return dlnk;
+}
+
+ERTS_GLB_INLINE int
+erts_link_dist_insert(ErtsLink *lnk, ErtsMonLnkDist *dist)
+{
+ ErtsLinkDataExtended *ldep;
+ int insert;
+
+ ERTS_ML_ASSERT(lnk->flags & ERTS_ML_FLG_EXTENDED);
+ ERTS_ML_ASSERT(lnk->type == ERTS_LNK_TYPE_DIST_PROC);
+
+ ldep = (ErtsLinkDataExtended *) erts_link_to_data(lnk);
+
+ ERTS_ML_ASSERT(!ldep->dist);
+ ERTS_ML_ASSERT(dist);
+ ldep->dist = dist;
+
+ erts_mon_link_dist_inc_refc(dist);
+
+ erts_mtx_lock(&dist->mtx);
+
+ insert = dist->alive;
+ if (insert)
+ erts_link_list_insert(&dist->links, lnk);
+
+ erts_mtx_unlock(&dist->mtx);
+
+ return insert;
+}
+
+ERTS_GLB_INLINE int
+erts_link_dist_delete(ErtsLink *lnk)
+{
+ ErtsLinkDataExtended *ldep;
+ ErtsMonLnkDist *dist;
+ int delete;
+
+ ERTS_ML_ASSERT(lnk->flags & ERTS_ML_FLG_EXTENDED);
+ ERTS_ML_ASSERT(lnk->type == ERTS_LNK_TYPE_DIST_PROC);
+
+ ldep = (ErtsLinkDataExtended *) erts_link_to_data(lnk);
+ dist = ldep->dist;
+ if (!dist)
+ return -1;
+
+ erts_mtx_lock(&dist->mtx);
+
+ delete = !!dist->alive & !!(lnk->flags & ERTS_ML_FLG_IN_TABLE);
+ if (delete)
+ erts_link_list_delete(&dist->links, lnk);
+
+ erts_mtx_unlock(&dist->mtx);
+
+ return delete;
+}
+
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERL_MONITOR_LINK_H__ */
diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c
deleted file mode 100644
index 910598690d..0000000000
--- a/erts/emulator/beam/erl_monitors.c
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2016. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * %CopyrightEnd%
- */
-
-/**************************************************************************
- * Monitors and links data structure manipulation.
- * Monitors and links are organized as AVL trees with the reference as
- * key in the monitor case and the pid of the linked process as key in the
- * link case. Lookups the order of the references is somewhat special. Local
- * references are strictly smaller than remote references and are sorted
- * by inlined comparision functionality. Remote references are handled by the
- * usual cmp function.
- * Each Monitor is tagged with different tags depending on which end of the
- * monitor it is.
- * A monitor is removed either explicitly by reference or all monitors are
- * removed when the process exits. No need to access the monitor by pid.
- **************************************************************************/
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-#include "erl_vm.h"
-#include "global.h"
-#include "erl_process.h"
-#include "error.h"
-#include "erl_db.h"
-#include "bif.h"
-#include "big.h"
-#include "erl_monitors.h"
-
-#define STACK_NEED 50
-#define MAX_MONITORS 0xFFFFFFFFUL
-
-#define DIR_LEFT 0
-#define DIR_RIGHT 1
-#define DIR_END 2
-
-static erts_smp_atomic_t tot_link_lh_size;
-
-/* Implements the sort order in monitor trees, which is different from
- the ordinary term order.
- No short local ref's should ever exist (the ref is created by the bif's
- in runtime), therefore:
- All local ref's are less than external ref's
- Local ref's are inline-compared,
- External ref's are compared by cmp */
-
-#if 0
-#define CMP_MON_REF(Ref1,Ref2) \
-cmp((Ref1),(Ref2)) /* XXX, the inline comparision yet to be done */
-#else
-#define CMP_MON_REF(Ref1,Ref2) cmp_mon_ref((Ref1),(Ref2))
-#endif
-
-static ERTS_INLINE int cmp_mon_ref(Eterm ref1, Eterm ref2)
-{
- Eterm *b1, *b2;
-
-
- b1 = boxed_val(ref1);
- b2 = boxed_val(ref2);
- if (is_ref_thing_header(*b1)) {
- if (is_ref_thing_header(*b2)) {
- return memcmp(b1+1,b2+1,ERTS_REF_WORDS*sizeof(Uint));
- }
- return -1;
- }
- if (is_ref_thing_header(*b2)) {
- return 1;
- }
- return CMP(ref1,ref2);
-}
-
-#define CP_LINK_VAL(To, Hp, From) \
-do { \
- if (IS_CONST(From)) \
- (To) = (From); \
- else { \
- Uint i__; \
- Uint len__; \
- ASSERT((Hp)); \
- ASSERT(is_internal_ref((From)) || is_external((From))); \
- (To) = make_boxed((Hp)); \
- len__ = thing_arityval(*boxed_val((From))) + 1; \
- for(i__ = 0; i__ < len__; i__++) \
- (*((Hp)++)) = boxed_val((From))[i__]; \
- if (is_external((To))) { \
- external_thing_ptr((To))->next = NULL; \
- erts_refc_inc(&(external_thing_ptr((To))->node->refc), 2);\
- } \
- } \
-} while (0)
-
-static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name)
-{
- Uint mon_size = ERTS_MONITOR_SIZE;
- ErtsMonitor *n;
- Eterm *hp;
-
- mon_size += NC_HEAP_SIZE(ref);
- if (!IS_CONST(pid)) {
- mon_size += NC_HEAP_SIZE(pid);
- }
-
- if (mon_size <= ERTS_MONITOR_SH_SIZE) {
- n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_SH,
- mon_size*sizeof(Uint));
- } else {
- n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH,
- mon_size*sizeof(Uint));
- erts_smp_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint));
- }
- hp = n->heap;
-
-
- n->left = n->right = NULL; /* Always the same initial value*/
- n->type = (Uint16) type;
- n->balance = 0; /* Always the same initial value */
- n->name = name; /* atom() or [] */
- CP_LINK_VAL(n->ref, hp, ref); /*XXX Unneccesary check, never immediate*/
- CP_LINK_VAL(n->pid, hp, pid);
-
- return n;
-}
-
-static ErtsLink *create_link(Uint type, Eterm pid)
-{
- Uint lnk_size = ERTS_LINK_SIZE;
- ErtsLink *n;
- Eterm *hp;
-
- if (!IS_CONST(pid)) {
- lnk_size += NC_HEAP_SIZE(pid);
- }
-
- if (lnk_size <= ERTS_LINK_SH_SIZE) {
- n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_SH,
- lnk_size*sizeof(Uint));
- } else {
- n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_LH,
- lnk_size*sizeof(Uint));
- erts_smp_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint));
- }
- hp = n->heap;
-
-
- n->left = n->right = NULL; /* Always the same initial value*/
- n->type = (Uint16) type;
- n->balance = 0; /* Always the same initial value */
- if (n->type == LINK_NODE) {
- ERTS_LINK_REFC(n) = 0;
- } else {
- ERTS_LINK_ROOT(n) = NULL;
- }
- CP_LINK_VAL(n->pid, hp, pid);
-
- return n;
-}
-
-#undef CP_LINK_VAL
-
-static ErtsSuspendMonitor *create_suspend_monitor(Eterm pid)
-{
- ErtsSuspendMonitor *smon = erts_alloc(ERTS_ALC_T_SUSPEND_MON,
- sizeof(ErtsSuspendMonitor));
- smon->left = smon->right = NULL; /* Always the same initial value */
- smon->balance = 0; /* Always the same initial value */
- smon->pending = 0;
- smon->active = 0;
- smon->pid = pid;
- return smon;
-}
-
-void
-erts_init_monitors(void)
-{
- erts_smp_atomic_init_nob(&tot_link_lh_size, 0);
-}
-
-Uint
-erts_tot_link_lh_size(void)
-{
- return (Uint) erts_smp_atomic_read_nob(&tot_link_lh_size);
-}
-
-void erts_destroy_monitor(ErtsMonitor *mon)
-{
- Uint mon_size = ERTS_MONITOR_SIZE;
- ErlNode *node;
-
- ASSERT(!IS_CONST(mon->ref));
- mon_size += NC_HEAP_SIZE(mon->ref);
- if (is_external(mon->ref)) {
- node = external_thing_ptr(mon->ref)->node;
- erts_deref_node_entry(node);
- }
- if (!IS_CONST(mon->pid)) {
- mon_size += NC_HEAP_SIZE(mon->pid);
- if (is_external(mon->pid)) {
- node = external_thing_ptr(mon->pid)->node;
- erts_deref_node_entry(node);
- }
- }
- if (mon_size <= ERTS_MONITOR_SH_SIZE) {
- erts_free(ERTS_ALC_T_MONITOR_SH, (void *) mon);
- } else {
- erts_free(ERTS_ALC_T_MONITOR_LH, (void *) mon);
- erts_smp_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint));
- }
-}
-
-void erts_destroy_link(ErtsLink *lnk)
-{
- Uint lnk_size = ERTS_LINK_SIZE;
- ErlNode *node;
-
- ASSERT(lnk->type == LINK_NODE || ERTS_LINK_ROOT(lnk) == NULL);
-
- if (!IS_CONST(lnk->pid)) {
- lnk_size += NC_HEAP_SIZE(lnk->pid);
- if (is_external(lnk->pid)) {
- node = external_thing_ptr(lnk->pid)->node;
- erts_deref_node_entry(node);
- }
- }
- if (lnk_size <= ERTS_LINK_SH_SIZE) {
- erts_free(ERTS_ALC_T_NLINK_SH, (void *) lnk);
- } else {
- erts_free(ERTS_ALC_T_NLINK_LH, (void *) lnk);
- erts_smp_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint));
- }
-}
-
-void erts_destroy_suspend_monitor(ErtsSuspendMonitor *smon)
-{
- erts_free(ERTS_ALC_T_SUSPEND_MON, smon);
-}
-
-static void insertion_rotation(int dstack[], int dpos,
- void *tstack[], int tpos,
- int state) {
-
- ErtsMonitorOrLink **this;
- ErtsMonitorOrLink *p1, *p2, *p;
- int dir;
-
- while (state && ( dir = dstack[--dpos] ) != DIR_END) {
- this = tstack[--tpos];
- p = *this;
- if (dir == DIR_LEFT) {
- switch (p->balance) {
- case 1:
- p->balance = 0;
- state = 0;
- break;
- case 0:
- p->balance = -1;
- break;
- case -1: /* The icky case */
- p1 = p->left;
- if (p1->balance == -1) { /* Single LL rotation */
- p->left = p1->right;
- p1->right = p;
- p->balance = 0;
- (*this) = p1;
- } else { /* Double RR rotation */
- p2 = p1->right;
- p1->right = p2->left;
- p2->left = p1;
- p->left = p2->right;
- p2->right = p;
- p->balance = (p2->balance == -1) ? +1 : 0;
- p1->balance = (p2->balance == 1) ? -1 : 0;
- (*this) = p2;
- }
- (*this)->balance = 0;
- state = 0;
- break;
- }
- } else { /* dir == DIR_RIGHT */
- switch (p->balance) {
- case -1:
- p->balance = 0;
- state = 0;
- break;
- case 0:
- p->balance = 1;
- break;
- case 1:
- p1 = p->right;
- if (p1->balance == 1) { /* Single RR rotation */
- p->right = p1->left;
- p1->left = p;
- p->balance = 0;
- (*this) = p1;
- } else { /* Double RL rotation */
- p2 = p1->left;
- p1->left = p2->right;
- p2->right = p1;
- p->right = p2->left;
- p2->left = p;
- p->balance = (p2->balance == 1) ? -1 : 0;
- p1->balance = (p2->balance == -1) ? 1 : 0;
- (*this) = p2;
- }
- (*this)->balance = 0;
- state = 0;
- break;
- }
- }
- }
-}
-
-void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
- Eterm name)
-{
- void *tstack[STACK_NEED];
- int tpos = 0;
- int dstack[STACK_NEED+1];
- int dpos = 1;
- int state = 0;
- ErtsMonitor **this = root;
- Sint c;
-
- dstack[0] = DIR_END;
- for (;;) {
- if (!*this) { /* Found our place */
- state = 1;
- *this = create_monitor(type,ref,pid,name);
- break;
- } else if ((c = CMP_MON_REF(ref,(*this)->ref)) < 0) {
- /* go left */
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = this;
- this = &((*this)->left);
- } else if (c > 0) { /* go right */
- dstack[dpos++] = DIR_RIGHT;
- tstack[tpos++] = this;
- this = &((*this)->right);
- } else { /* Equal key is an error for monitors */
- erts_exit(ERTS_ERROR_EXIT,"Insertion of already present monitor!");
- break;
- }
- }
- insertion_rotation(dstack, dpos, tstack, tpos, state);
-}
-
-
-/* Returns 0 if OK, < 0 if already present */
-int erts_add_link(ErtsLink **root, Uint type, Eterm pid)
-{
- void *tstack[STACK_NEED];
- int tpos = 0;
- int dstack[STACK_NEED+1];
- int dpos = 1;
- int state = 0;
- ErtsLink **this = root;
- Sint c;
-
- dstack[0] = DIR_END;
- for (;;) {
- if (!*this) { /* Found our place */
- state = 1;
- *this = create_link(type,pid);
- break;
- } else if ((c = CMP(pid,(*this)->pid)) < 0) {
- /* go left */
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = this;
- this = &((*this)->left);
- } else if (c > 0) { /* go right */
- dstack[dpos++] = DIR_RIGHT;
- tstack[tpos++] = this;
- this = &((*this)->right);
- } else { /* Equal key is an error for monitors */
- return -1;
- }
- }
- insertion_rotation(dstack, dpos, tstack, tpos, state);
- return 0;
-}
-
-ErtsSuspendMonitor *
-erts_add_or_lookup_suspend_monitor(ErtsSuspendMonitor **root, Eterm pid)
-{
- void *tstack[STACK_NEED];
- int tpos = 0;
- int dstack[STACK_NEED+1];
- int dpos = 1;
- int state = 0;
- ErtsSuspendMonitor **this = root;
- ErtsSuspendMonitor *res;
- Sint c;
-
- dstack[0] = DIR_END;
- for (;;) {
- if (!*this) { /* Found our place */
- state = 1;
- res = *this = create_suspend_monitor(pid);
- break;
- } else if ((c = CMP(pid,(*this)->pid)) < 0) {
- /* go left */
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = this;
- this = &((*this)->left);
- } else if (c > 0) { /* go right */
- dstack[dpos++] = DIR_RIGHT;
- tstack[tpos++] = this;
- this = &((*this)->right);
- } else { /* Already here... */
- ASSERT((*this)->pid == pid);
- return *this;
- }
- }
- insertion_rotation(dstack, dpos, tstack, tpos, state);
- return res;
-}
-
-
-/* Returns the new or old link structure */
-ErtsLink *erts_add_or_lookup_link(ErtsLink **root, Uint type, Eterm pid)
-{
- void *tstack[STACK_NEED];
- int tpos = 0;
- int dstack[STACK_NEED+1];
- int dpos = 1;
- int state = 0;
- ErtsLink **this = root;
- Sint c;
- ErtsLink *ret = NULL;
-
- dstack[0] = DIR_END;
- for (;;) {
- if (!*this) { /* Found our place */
- state = 1;
- *this = create_link(type,pid);
- ret = *this;
- break;
- } else if ((c = CMP(pid,(*this)->pid)) < 0) {
- /* go left */
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = this;
- this = &((*this)->left);
- } else if (c > 0) { /* go right */
- dstack[dpos++] = DIR_RIGHT;
- tstack[tpos++] = this;
- this = &((*this)->right);
- } else { /* Equal key is an error for monitors */
- return *this;
- }
- }
- insertion_rotation(dstack, dpos, tstack, tpos, state);
- return ret;
-}
-
-
-/*
- * Deletion helpers
- */
-static int balance_left(ErtsMonitorOrLink **this)
-{
- ErtsMonitorOrLink *p, *p1, *p2;
- int b1, b2, h = 1;
-
- p = *this;
- switch (p->balance) {
- case -1:
- p->balance = 0;
- break;
- case 0:
- p->balance = 1;
- h = 0;
- break;
- case 1:
- p1 = p->right;
- b1 = p1->balance;
- if (b1 >= 0) { /* Single RR rotation */
- p->right = p1->left;
- p1->left = p;
- if (b1 == 0) {
- p->balance = 1;
- p1->balance = -1;
- h = 0;
- } else {
- p->balance = p1->balance = 0;
- }
- (*this) = p1;
- } else { /* Double RL rotation */
- p2 = p1->left;
- b2 = p2->balance;
- p1->left = p2->right;
- p2->right = p1;
- p->right = p2->left;
- p2->left = p;
- p->balance = (b2 == 1) ? -1 : 0;
- p1->balance = (b2 == -1) ? 1 : 0;
- p2->balance = 0;
- (*this) = p2;
- }
- break;
- }
- return h;
-}
-
-static int balance_right(ErtsMonitorOrLink **this)
-{
- ErtsMonitorOrLink *p, *p1, *p2;
- int b1, b2, h = 1;
-
- p = *this;
- switch (p->balance) {
- case 1:
- p->balance = 0;
- break;
- case 0:
- p->balance = -1;
- h = 0;
- break;
- case -1:
- p1 = p->left;
- b1 = p1->balance;
- if (b1 <= 0) { /* Single LL rotation */
- p->left = p1->right;
- p1->right = p;
- if (b1 == 0) {
- p->balance = -1;
- p1->balance = 1;
- h = 0;
- } else {
- p->balance = p1->balance = 0;
- }
- (*this) = p1;
- } else { /* Double LR rotation */
- p2 = p1->right;
- b2 = p2->balance;
- p1->right = p2->left;
- p2->left = p1;
- p->left = p2->right;
- p2->right = p;
- p->balance = (b2 == -1) ? 1 : 0;
- p1->balance = (b2 == 1) ? -1 : 0;
- p2->balance = 0;
- (*this) = p2;
- }
- }
- return h;
-}
-
-static int delsub(ErtsMonitorOrLink **this)
-{
- ErtsMonitorOrLink **tstack[STACK_NEED];
- int tpos = 0;
- ErtsMonitorOrLink *q = (*this);
- ErtsMonitorOrLink **r = &(q->left);
- int h;
-
- /*
- * Walk down the tree to the right and search
- * for a void right child, pick that child out
- * and return it to be put in the deleted
- * object's place.
- */
-
- while ((*r)->right != NULL) {
- tstack[tpos++] = r;
- r = &((*r)->right);
- }
- *this = *r;
- *r = (*r)->left;
- (*this)->left = q->left;
- (*this)->right = q->right;
- (*this)->balance = q->balance;
- tstack[0] = &((*this)->left);
- h = 1;
- while (tpos && h) {
- r = tstack[--tpos];
- h = balance_right(r);
- }
- return h;
-}
-
-ErtsMonitor *erts_remove_monitor(ErtsMonitor **root, Eterm ref)
-{
- ErtsMonitor **tstack[STACK_NEED];
- int tpos = 0;
- int dstack[STACK_NEED+1];
- int dpos = 1;
- int state = 0;
- ErtsMonitor **this = root;
- Sint c;
- int dir;
- ErtsMonitor *q = NULL;
-
- dstack[0] = DIR_END;
- for (;;) {
- if (!*this) { /* Failure */
- return NULL;
- } else if ((c = CMP_MON_REF(ref,(*this)->ref)) < 0) {
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = this;
- this = &((*this)->left);
- } else if (c > 0) { /* go right */
- dstack[dpos++] = DIR_RIGHT;
- tstack[tpos++] = this;
- this = &((*this)->right);
- } else { /* Equal key, found the one to delete */
- q = (*this);
- if (q->right == NULL) {
- (*this) = q->left;
- state = 1;
- } else if (q->left == NULL) {
- (*this) = q->right;
- state = 1;
- } else {
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = this;
- state = delsub((ErtsMonitorOrLink **) this);
- }
- break;
- }
- }
- while (state && ( dir = dstack[--dpos] ) != DIR_END) {
- this = tstack[--tpos];
- if (dir == DIR_LEFT) {
- state = balance_left((ErtsMonitorOrLink **) this);
- } else {
- state = balance_right((ErtsMonitorOrLink **) this);
- }
- }
- return q;
-}
-
-ErtsLink *erts_remove_link(ErtsLink **root, Eterm pid)
-{
- ErtsLink **tstack[STACK_NEED];
- int tpos = 0;
- int dstack[STACK_NEED+1];
- int dpos = 1;
- int state = 0;
- ErtsLink **this = root;
- Sint c;
- int dir;
- ErtsLink *q = NULL;
-
- dstack[0] = DIR_END;
- for (;;) {
- if (!*this) { /* Failure */
- return NULL;
- } else if ((c = CMP(pid,(*this)->pid)) < 0) {
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = this;
- this = &((*this)->left);
- } else if (c > 0) { /* go right */
- dstack[dpos++] = DIR_RIGHT;
- tstack[tpos++] = this;
- this = &((*this)->right);
- } else { /* Equal key, found the one to delete */
- q = (*this);
- if (q->right == NULL) {
- (*this) = q->left;
- state = 1;
- } else if (q->left == NULL) {
- (*this) = q->right;
- state = 1;
- } else {
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = this;
- state = delsub((ErtsMonitorOrLink **) this);
- }
- break;
- }
- }
- while (state && ( dir = dstack[--dpos] ) != DIR_END) {
- this = tstack[--tpos];
- if (dir == DIR_LEFT) {
- state = balance_left((ErtsMonitorOrLink **) this);
- } else {
- state = balance_right((ErtsMonitorOrLink **) this);
- }
- }
- return q;
-}
-
-void
-erts_delete_suspend_monitor(ErtsSuspendMonitor **root, Eterm pid)
-{
- ErtsSuspendMonitor **tstack[STACK_NEED];
- int tpos = 0;
- int dstack[STACK_NEED+1];
- int dpos = 1;
- int state = 0;
- ErtsSuspendMonitor **this = root;
- Sint c;
- int dir;
- ErtsSuspendMonitor *q = NULL;
-
- dstack[0] = DIR_END;
- for (;;) {
- if (!*this) { /* Nothing found */
- return;
- } else if ((c = CMP(pid,(*this)->pid)) < 0) {
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = this;
- this = &((*this)->left);
- } else if (c > 0) { /* go right */
- dstack[dpos++] = DIR_RIGHT;
- tstack[tpos++] = this;
- this = &((*this)->right);
- } else { /* Equal key, found the one to delete */
- q = (*this);
- ASSERT(q->pid == pid);
- if (q->right == NULL) {
- (*this) = q->left;
- state = 1;
- } else if (q->left == NULL) {
- (*this) = q->right;
- state = 1;
- } else {
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = this;
- state = delsub((ErtsMonitorOrLink **) this);
- }
- erts_destroy_suspend_monitor(q);
- break;
- }
- }
- while (state && ( dir = dstack[--dpos] ) != DIR_END) {
- this = tstack[--tpos];
- if (dir == DIR_LEFT) {
- state = balance_left((ErtsMonitorOrLink **) this);
- } else {
- state = balance_right((ErtsMonitorOrLink **) this);
- }
- }
-}
-
-ErtsMonitor *erts_lookup_monitor(ErtsMonitor *root, Eterm ref)
-{
- Sint c;
-
- for (;;) {
- if (root == NULL || (c = CMP_MON_REF(ref,root->ref)) == 0) {
- return root;
- } else if (c < 0) {
- root = root->left;
- } else { /* c > 0 */
- root = root->right;
- }
- }
-}
-
-ErtsLink *erts_lookup_link(ErtsLink *root, Eterm pid)
-{
- Sint c;
-
- for (;;) {
- if (root == NULL || (c = CMP(pid,root->pid)) == 0) {
- return root;
- } else if (c < 0) {
- root = root->left;
- } else { /* c > 0 */
- root = root->right;
- }
- }
-}
-
-ErtsSuspendMonitor *
-erts_lookup_suspend_monitor(ErtsSuspendMonitor *root, Eterm pid)
-{
- Sint c;
-
- for (;;) {
- if (root == NULL || (c = CMP(pid,root->pid)) == 0) {
- return root;
- } else if (c < 0) {
- root = root->left;
- } else { /* c > 0 */
- root = root->right;
- }
- }
-}
-
-void erts_sweep_monitors(ErtsMonitor *root,
- void (*doit)(ErtsMonitor *, void *),
- void *context)
-{
- ErtsMonitor *tstack[STACK_NEED];
- int tpos = 0;
- int dstack[STACK_NEED+1];
- int dpos = 1;
- int dir;
-
- dstack[0] = DIR_END;
-
- for (;;) {
- if (root == NULL) {
- if ((dir = dstack[dpos-1]) == DIR_END) {
- return;
- }
- if (dir == DIR_LEFT) {
- /* Still has DIR_RIGHT to do */
- dstack[dpos-1] = DIR_RIGHT;
- root = (tstack[tpos-1])->right;
- } else {
- /* stacktop is an object to be deleted */
- (*doit)(tstack[--tpos],context); /* expeted to do the
- deletion */
- --dpos;
- root = NULL;
- }
- } else {
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = root;
- root = root->left;
- }
- }
-}
-
-void erts_sweep_links(ErtsLink *root,
- void (*doit)(ErtsLink *, void *),
- void *context)
-{
- ErtsLink *tstack[STACK_NEED];
- int tpos = 0;
- int dstack[STACK_NEED+1];
- int dpos = 1;
- int dir;
-
- dstack[0] = DIR_END;
-
- for (;;) {
- if (root == NULL) {
- if ((dir = dstack[dpos-1]) == DIR_END) {
- return;
- }
- if (dir == DIR_LEFT) {
- /* Still has DIR_RIGHT to do */
- dstack[dpos-1] = DIR_RIGHT;
- root = (tstack[tpos-1])->right;
- } else {
- /* stacktop is an object to be deleted */
- (*doit)(tstack[--tpos],context); /* expeted to do the
- deletion */
- --dpos;
- root = NULL;
- }
- } else {
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = root;
- root = root->left;
- }
- }
-}
-
-void erts_sweep_suspend_monitors(ErtsSuspendMonitor *root,
- void (*doit)(ErtsSuspendMonitor *, void *),
- void *context)
-{
- ErtsSuspendMonitor *tstack[STACK_NEED];
- int tpos = 0;
- int dstack[STACK_NEED+1];
- int dpos = 1;
- int dir;
-
- dstack[0] = DIR_END;
-
- for (;;) {
- if (root == NULL) {
- if ((dir = dstack[dpos-1]) == DIR_END) {
- return;
- }
- if (dir == DIR_LEFT) {
- /* Still has DIR_RIGHT to do */
- dstack[dpos-1] = DIR_RIGHT;
- root = (tstack[tpos-1])->right;
- } else {
- /* stacktop is an object to be deleted */
- (*doit)(tstack[--tpos],context); /* expeted to do the
- deletion */
- --dpos;
- root = NULL;
- }
- } else {
- dstack[dpos++] = DIR_LEFT;
- tstack[tpos++] = root;
- root = root->left;
- }
- }
-}
-
-
-/* Debug BIF, always present, but undocumented... */
-
-static void erts_dump_monitors(ErtsMonitor *root, int indent)
-{
- if (root == NULL)
- return;
- erts_dump_monitors(root->right,indent+2);
- erts_printf("%*s[%b16d:%b16u:%T:%T:%T]\n", indent, "", root->balance,
- root->type, root->ref, root->pid, root->name);
- erts_dump_monitors(root->left,indent+2);
-}
-
-static void erts_dump_links_aux(ErtsLink *root, int indent,
- erts_dsprintf_buf_t *dsbufp)
-{
- if (root == NULL)
- return;
- erts_dump_links_aux(root->right, indent+2, dsbufp);
- dsbufp->str_len = 0;
- erts_dsprintf(dsbufp, "%*s[%b16d:%b16u:%T:%p]", indent, "",
- root->balance, root->type, root->pid, ERTS_LINK_ROOT(root));
- if (ERTS_LINK_ROOT(root) != NULL) {
- ErtsLink *sub = ERTS_LINK_ROOT(root);
- int len = dsbufp->str_len;
- erts_dump_links_aux(sub->right, indent+len+5, dsbufp);
- erts_dsprintf(dsbufp, "-> %*s[%b16d:%b16u:%T:%p]", indent, "",
- sub->balance, sub->type, sub->pid, ERTS_LINK_ROOT(sub));
- erts_printf("%s\n", dsbufp->str);
- erts_dump_links_aux(sub->left, indent+len+5, dsbufp);
- } else {
- erts_printf("%s\n", dsbufp->str);
- }
- erts_dump_links_aux(root->left, indent+2, dsbufp);
-}
-
-static void erts_dump_links(ErtsLink *root, int indent)
-{
- erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
- erts_dump_links_aux(root, indent, dsbufp);
- erts_destroy_tmp_dsbuf(dsbufp);
-}
-
-Eterm erts_debug_dump_monitors_1(BIF_ALIST_1)
-{
- Process *p = BIF_P;
- Eterm pid = BIF_ARG_1;
- Process *rp;
- DistEntry *dep;
- rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK);
- if (!rp) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(p);
- if (is_atom(pid) && is_node_name_atom(pid) &&
- (dep = erts_find_dist_entry(pid)) != NULL) {
- erts_printf("Dumping dist monitors-------------------\n");
- erts_smp_de_links_lock(dep);
- erts_dump_monitors(dep->monitors,0);
- erts_smp_de_links_unlock(dep);
- erts_printf("Monitors dumped-------------------------\n");
- erts_deref_dist_entry(dep);
- BIF_RET(am_true);
- } else {
- BIF_ERROR(p,BADARG);
- }
- } else {
- erts_printf("Dumping pid monitors--------------------\n");
- erts_dump_monitors(ERTS_P_MONITORS(rp),0);
- erts_printf("Monitors dumped-------------------------\n");
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- BIF_RET(am_true);
- }
-}
-
-Eterm erts_debug_dump_links_1(BIF_ALIST_1)
-{
- Process *p = BIF_P;
- Eterm pid = BIF_ARG_1;
- Process *rp;
- DistEntry *dep;
- if (is_internal_port(pid)) {
- Port *rport = erts_id2port_sflgs(pid,
- p,
- ERTS_PROC_LOCK_MAIN,
- ERTS_PORT_SFLGS_INVALID_LOOKUP);
- if (rport) {
- erts_printf("Dumping port links----------------------\n");
- erts_dump_links(ERTS_P_LINKS(rport), 0);
- erts_printf("Links dumped----------------------------\n");
- erts_port_release(rport);
- BIF_RET(am_true);
- } else {
- BIF_ERROR(p,BADARG);
- }
- } else {
- rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK);
- if (!rp) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(p);
- if (is_atom(pid) && is_node_name_atom(pid) &&
- (dep = erts_find_dist_entry(pid)) != NULL) {
- erts_printf("Dumping dist links----------------------\n");
- erts_smp_de_links_lock(dep);
- erts_dump_links(dep->nlinks,0);
- erts_smp_de_links_unlock(dep);
- erts_printf("Links dumped----------------------------\n");
- erts_deref_dist_entry(dep);
- BIF_RET(am_true);
- } else {
- BIF_ERROR(p,BADARG);
- }
-
- } else {
- erts_printf("Dumping pid links-----------------------\n");
- erts_dump_links(ERTS_P_LINKS(rp), 0);
- erts_printf("Links dumped----------------------------\n");
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- BIF_RET(am_true);
- }
- }
-}
-
-void erts_one_link_size(ErtsLink *lnk, void *vpu)
-{
- Uint *pu = vpu;
- *pu += ERTS_LINK_SIZE*sizeof(Uint);
- if(!IS_CONST(lnk->pid))
- *pu += NC_HEAP_SIZE(lnk->pid)*sizeof(Uint);
- if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
- erts_doforall_links(ERTS_LINK_ROOT(lnk),&erts_one_link_size,vpu);
- }
-}
-void erts_one_mon_size(ErtsMonitor *mon, void *vpu)
-{
- Uint *pu = vpu;
- *pu += ERTS_MONITOR_SIZE*sizeof(Uint);
- if(!IS_CONST(mon->pid))
- *pu += NC_HEAP_SIZE(mon->pid)*sizeof(Uint);
- if(!IS_CONST(mon->ref))
- *pu += NC_HEAP_SIZE(mon->ref)*sizeof(Uint);
-}
diff --git a/erts/emulator/beam/erl_monitors.h b/erts/emulator/beam/erl_monitors.h
deleted file mode 100644
index 9e2beedea3..0000000000
--- a/erts/emulator/beam/erl_monitors.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2016. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * %CopyrightEnd%
- */
-
-/**********************************************************************
- * Header for monitors and links data structures.
- * Monitors are kept in an AVL tree and the data structures for
- * the four different types of monitors are like this:
- **********************************************************************
- * Local monitor by pid/port:
- * (Ref is always same in all involved data structures)
- **********************************************************************
- * Process/Port X Process Y
- * +-------------+ +-------------+
- * Type: | MON_ORIGIN | | MON_TARGET |
- * +-------------+ +-------------+
- * Pid: | Pid(Y) | | Pid/Port(X) |
- * +-------------+ +-------------+
- * Name: | [] | | [] |
- * +-------------+ +-------------+
- **********************************************************************
- * Local monitor by name: (Ref is always same in all involved data structures)
- **********************************************************************
- * Process X Process Y (name foo)
- * +-------------+ +-------------+
- * Type: | MON_ORIGIN | | MON_TARGET |
- * +-------------+ +-------------+
- * Pid: | Pid(Y) | | Pid(X) |
- * +-------------+ +-------------+
- * Name: | Atom(foo) | | Atom(foo) |
- * +-------------+ +-------------+
- **********************************************************************
- * Remote monitor by pid: (Ref is always same in all involved data structures)
- **********************************************************************
- * Node A | Node B
- * ---------------------------------+----------------------------------
- * Process X (@A) Distentry @A Distentry @B Process Y (@B)
- * for node B for node A
- * +-------------+ +-------------+ +-------------+ +-------------+
- * Type: | MON_ORIGIN | | MON_TARGET | | MON_ORIGIN | | MON_TARGET |
- * +-------------+ +-------------+ +-------------+ +-------------+
- * Pid: | Pid(Y) | | Pid(X) | | Pid(Y) | | Pid(X) |
- * +-------------+ +-------------+ +-------------+ +-------------+
- * Name: | [] | | [] | | [] | | [] |
- * +-------------+ +-------------+ +-------------+ +-------------+
- **********************************************************************
- * Remote monitor by name: (Ref is always same in all involved data structures)
- **********************************************************************
- * Node A | Node B
- * ---------------------------------+----------------------------------
- * Process X (@A) Distentry @A Distentry @B Process Y (@B)
- * for node B for node A (name foo)
- * +-------------+ +-------------+ +-------------+ +-------------+
- * Type: | MON_ORIGIN | | MON_TARGET | | MON_ORIGIN | | MON_TARGET |
- * +-------------+ +-------------+ +-------------+ +-------------+
- * Pid: | Atom(node B)| | Pid(X) | | Pid(Y) | | Pid(X) |
- * +-------------+ +-------------+ +-------------+ +-------------+
- * Name: | Atom(foo) | | Atom(foo) | | Atom(foo) | | Atom(foo) |
- * +-------------+ +-------------+ +-------------+ +-------------+
- * The reason for the node atom in X->pid is that we don't know the actual
- * pid of the monitored process on the other node when setting the monitor
- * (which is done asyncronously).
- **********************************************************************/
-#ifndef _ERL_MONITORS_H
-#define _ERL_MONITORS_H
-
-/* Type tags for monitors */
-#define MON_ORIGIN 1
-#define MON_TARGET 3
-#define MON_TIME_OFFSET 7
-
-/* Type tags for links */
-#define LINK_PID 1 /* ...Or port */
-#define LINK_NODE 3 /* "Node monitor" */
-
-/* Size of a monitor without heap, in words (fixalloc) */
-#define ERTS_MONITOR_SIZE ((sizeof(ErtsMonitor) - sizeof(Uint))/sizeof(Uint))
-#define ERTS_MONITOR_SH_SIZE (ERTS_MONITOR_SIZE + REF_THING_SIZE)
-#define ERTS_LINK_SIZE ((sizeof(ErtsLink) - sizeof(Uint))/sizeof(Uint))
-#define ERTS_LINK_SH_SIZE ERTS_LINK_SIZE /* Size of fix-alloced links */
-
-/* ErtsMonitor and ErtsLink *need* to begin in a similar way as
- ErtsMonitorOrLink */
-typedef struct erts_monitor_or_link {
- struct erts_monitor_or_link *left, *right;
- Sint16 balance;
-} ErtsMonitorOrLink;
-
-typedef struct erts_monitor {
- struct erts_monitor *left, *right;
- Sint16 balance;
- Uint16 type; /* MON_ORIGIN | MON_TARGET | MON_TIME_OFFSET */
- Eterm ref;
- Eterm pid; /* In case of distributed named monitor, this is the
- nodename atom in MON_ORIGIN process, otherwise a pid or
- , in case of a MON_TARGET, a port */
- Eterm name; /* When monitoring a named process: atom() else [] */
- Uint heap[1]; /* Larger in reality */
-} ErtsMonitor;
-
-typedef struct erts_link {
- struct erts_link *left, *right;
- Sint16 balance;
- Uint16 type; /* LINK_PID | LINK_NODE */
- Eterm pid; /* When node monitor,
- the node atom is here instead */
- union {
- struct erts_link *root; /* Used only in dist entries */
- Uint refc;
- } shared;
- Uint heap[1]; /* Larger in reality */
-} ErtsLink;
-
-typedef struct erts_suspend_monitor {
- struct erts_suspend_monitor *left, *right;
- Sint16 balance;
-
- int pending;
- int active;
- Eterm pid;
-} ErtsSuspendMonitor;
-
-#define ERTS_LINK_ROOT(Linkp) ((Linkp)->shared.root)
-#define ERTS_LINK_REFC(Linkp) ((Linkp)->shared.refc)
-
-Uint erts_tot_link_lh_size(void);
-
-
-/* Prototypes */
-void erts_destroy_monitor(ErtsMonitor *mon);
-void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
- Eterm name);
-ErtsMonitor *erts_remove_monitor(ErtsMonitor **root, Eterm ref);
-ErtsMonitor *erts_lookup_monitor(ErtsMonitor *root, Eterm ref);
-void erts_sweep_monitors(ErtsMonitor *root,
- void (*doit)(ErtsMonitor *, void *),
- void *context);
-
-void erts_destroy_link(ErtsLink *lnk);
-/* Returns 0 if OK, < 0 if already present */
-int erts_add_link(ErtsLink **root, Uint type, Eterm pid);
-ErtsLink *erts_add_or_lookup_link(ErtsLink **root, Uint type, Eterm pid);
-ErtsLink *erts_remove_link(ErtsLink **root, Eterm pid);
-ErtsLink *erts_lookup_link(ErtsLink *root, Eterm pid);
-void erts_sweep_links(ErtsLink *root,
- void (*doit)(ErtsLink *, void *),
- void *context);
-
-void erts_destroy_suspend_monitor(ErtsSuspendMonitor *sproc);
-void erts_sweep_suspend_monitors(ErtsSuspendMonitor *root,
- void (*doit)(ErtsSuspendMonitor *, void *),
- void *context);
-ErtsSuspendMonitor *erts_add_or_lookup_suspend_monitor(ErtsSuspendMonitor **root,
- Eterm pid);
-ErtsSuspendMonitor *erts_lookup_suspend_monitor(ErtsSuspendMonitor *root,
- Eterm pid);
-void erts_delete_suspend_monitor(ErtsSuspendMonitor **root, Eterm pid);
-void erts_init_monitors(void);
-void erts_one_link_size(ErtsLink *lnk, void *vpu);
-void erts_one_mon_size(ErtsMonitor *mon, void *vpu);
-
-#define erts_doforall_monitors erts_sweep_monitors
-#define erts_doforall_links erts_sweep_links
-#define erts_doforall_suspend_monitors erts_sweep_suspend_monitors
-
-#endif /* _ERL_MONITORS_H */
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
index 421445fbad..375b004b5b 100644
--- a/erts/emulator/beam/erl_msacc.c
+++ b/erts/emulator/beam/erl_msacc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014-2015. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -48,11 +48,7 @@ static Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, ErtsHeapFactory *factory)
static void erts_msacc_reset(ErtsMsAcc *msacc);
static ErtsMsAcc* get_msacc(void);
-#ifdef USE_THREADS
erts_tsd_key_t ERTS_WRITE_UNLIKELY(erts_msacc_key);
-#else
-ErtsMsAcc *ERTS_WRITE_UNLIKELY(erts_msacc) = NULL;
-#endif
#ifndef ERTS_MSACC_ALWAYS_ON
int ERTS_WRITE_UNLIKELY(erts_msacc_enabled);
#endif
@@ -60,15 +56,13 @@ int ERTS_WRITE_UNLIKELY(erts_msacc_enabled);
static Eterm *erts_msacc_state_atoms = NULL;
static erts_rwmtx_t msacc_mutex;
static ErtsMsAcc *msacc_managed = NULL;
-#ifdef USE_THREADS
static ErtsMsAcc *msacc_unmanaged = NULL;
static Uint msacc_unmanaged_count = 0;
-#endif
#if ERTS_MSACC_STATE_COUNT < MAP_SMALL_MAP_LIMIT
-#define DEFAULT_MSACC_MSG_SIZE (3 + 1 + ERTS_MSACC_STATE_COUNT * 2 + 3 + REF_THING_SIZE)
+#define DEFAULT_MSACC_MSG_SIZE (3 + 1 + ERTS_MSACC_STATE_COUNT * 2 + 3 + ERTS_REF_THING_SIZE)
#else
-#define DEFAULT_MSACC_MSG_SIZE (3 + ERTS_MSACC_STATE_COUNT * 3 + 3 + REF_THING_SIZE)
+#define DEFAULT_MSACC_MSG_SIZE (3 + ERTS_MSACC_STATE_COUNT * 3 + 3 + ERTS_REF_THING_SIZE)
#endif
/* we have to split initiation as atoms are not inited in early init */
@@ -76,12 +70,9 @@ void erts_msacc_early_init(void) {
#ifndef ERTS_MSACC_ALWAYS_ON
erts_msacc_enabled = 0;
#endif
- erts_rwmtx_init(&msacc_mutex,"msacc_list_mutex");
-#ifdef USE_THREADS
+ erts_rwmtx_init(&msacc_mutex, "msacc_list_mutex", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_tsd_key_create(&erts_msacc_key,"erts_msacc_key");
-#else
- erts_msacc = NULL;
-#endif
}
void erts_msacc_init(void) {
@@ -90,7 +81,7 @@ void erts_msacc_init(void) {
sizeof(Eterm)*ERTS_MSACC_STATE_COUNT);
for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) {
erts_msacc_state_atoms[i] = am_atom_put(erts_msacc_states[i],
- strlen(erts_msacc_states[i]));
+ sys_strlen(erts_msacc_states[i]));
}
}
@@ -106,10 +97,10 @@ void erts_msacc_init_thread(char *type, int id, int managed) {
msacc->tid = erts_thr_self();
msacc->perf_counter = 0;
-#ifdef USE_THREADS
erts_rwmtx_rwlock(&msacc_mutex);
if (!managed) {
- erts_mtx_init(&msacc->mtx,"msacc_unmanaged_mutex");
+ erts_mtx_init(&msacc->mtx, "msacc_unmanaged_mutex", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
msacc->next = msacc_unmanaged;
msacc_unmanaged = msacc;
msacc_unmanaged_count++;
@@ -119,9 +110,6 @@ void erts_msacc_init_thread(char *type, int id, int managed) {
msacc_managed = msacc;
}
erts_rwmtx_rwunlock(&msacc_mutex);
-#else
- msacc_managed = msacc;
-#endif
erts_msacc_reset(msacc);
@@ -137,8 +125,8 @@ void erts_msacc_init_thread(char *type, int id, int managed) {
void erts_msacc_set_bif_state(ErtsMsAcc *__erts_msacc_cache, Eterm mod, void *fn) {
#ifdef ERTS_MSACC_EXTENDED_BIFS
-#define BIF_LIST(Mod,Func,Arity,FuncAddr,Num) \
- if (fn == &FuncAddr) { \
+#define BIF_LIST(Mod,Func,Arity,BifFuncAddr,FuncAddr,Num) \
+ if (fn == &BifFuncAddr) { \
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATIC_STATE_COUNT + Num); \
} else
#include "erl_bif_list.h"
@@ -203,7 +191,7 @@ Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, ErtsHeapFactory *factory) {
map->keys = key;
hp[0] = state_map;
hp[1] = msacc->id;
- hp[2] = am_atom_put(msacc->type,strlen(msacc->type));
+ hp[2] = am_atom_put(msacc->type,sys_strlen(msacc->type));
return make_flatmap(map);
}
@@ -212,9 +200,9 @@ typedef struct {
int action;
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
} ErtsMSAccReq;
static ErtsMsAcc* get_msacc(void) {
@@ -245,7 +233,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx);
- hp = erts_produce_heap(&factory, REF_THING_SIZE + 3 /* tuple */, 0);
+ hp = erts_produce_heap(&factory, ERTS_REF_THING_SIZE + 3 /* tuple */, 0);
ref_copy = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref);
msg = erts_msacc_gather_stats(msacc, &factory);
msg = TUPLE2(hp, ref_copy, msg);
@@ -255,7 +243,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
erts_factory_close(&factory);
} else {
ErlOffHeap *ohp = NULL;
- msgp = erts_alloc_message_heap(rp, &rp_locks, REF_THING_SIZE, &hp, &ohp);
+ msgp = erts_alloc_message_heap(rp, &rp_locks, ERTS_REF_THING_SIZE, &hp, &ohp);
msg = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref);
}
@@ -265,7 +253,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
@@ -301,7 +289,7 @@ reply_msacc(void *vmsaccrp)
erts_proc_dec_refc(msaccrp->proc);
- if (erts_smp_atomic32_dec_read_nob(&msaccrp->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&msaccrp->refc) == 0)
erts_free(ERTS_ALC_T_MSACC, vmsaccrp);
}
@@ -368,14 +356,10 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads)
msaccrp->ref = STORE_NC(&hp, NULL, ref);
msaccrp->req_sched = esdp->no;
-#ifdef ERTS_SMP
*threads = erts_no_schedulers;
*threads += 1; /* aux thread */
-#else
- *threads = 1;
-#endif
- erts_smp_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads);
+ erts_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads);
erts_proc_add_refc(c_p, *threads);
@@ -384,12 +368,9 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads)
erts_no_schedulers,
reply_msacc,
(void *) msaccrp);
-#ifdef ERTS_SMP
/* aux thread */
erts_schedule_misc_aux_work(0, reply_msacc, (void *) msaccrp);
-#endif
-#ifdef USE_THREADS
/* Manage unmanaged threads */
switch (action) {
case ERTS_MSACC_GATHER: {
@@ -466,7 +447,6 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads)
default: { ASSERT(0); }
}
-#endif
*threads = make_small(*threads);
diff --git a/erts/emulator/beam/erl_msacc.h b/erts/emulator/beam/erl_msacc.h
index ad7c8c5eee..abea18b340 100644
--- a/erts/emulator/beam/erl_msacc.h
+++ b/erts/emulator/beam/erl_msacc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2014-2015. All Rights Reserved.
+ * Copyright Ericsson AB 2014-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@
#define ERL_MSACC_H__
/* Can be enabled/disabled via configure */
-#if ERTS_ENABLE_MSACC == 2
+#if defined(ERTS_ENABLE_MSACC) && ERTS_ENABLE_MSACC == 2
#define ERTS_MSACC_EXTENDED_STATES 1
#endif
@@ -66,7 +66,7 @@
#define ERTS_MSACC_STATE_COUNT 7
-#if ERTS_MSACC_STATE_STRINGS && ERTS_ENABLE_MSACC
+#if defined(ERTS_MSACC_STATE_STRINGS) && defined(ERTS_ENABLE_MSACC)
static char *erts_msacc_states[] = {
"aux",
"check_io",
@@ -104,7 +104,7 @@ static char *erts_msacc_states[] = {
#define ERTS_MSACC_STATE_COUNT ERTS_MSACC_STATIC_STATE_COUNT
#endif
-#if ERTS_MSACC_STATE_STRINGS
+#ifdef ERTS_MSACC_STATE_STRINGS
static char *erts_msacc_states[] = {
"alloc",
"aux",
@@ -122,7 +122,7 @@ static char *erts_msacc_states[] = {
"sleep",
"timers"
#ifdef ERTS_MSACC_EXTENDED_BIFS
-#define BIF_LIST(Mod,Func,Arity,FuncAddr,Num) \
+#define BIF_LIST(Mod,Func,Arity,BifFuncAddr,FuncAddr,Num) \
,"bif_" #Mod "_" #Func "_" #Arity
#include "erl_bif_list.h"
#undef BIF_LIST
@@ -157,27 +157,18 @@ struct erl_msacc_t_ {
};
-#if ERTS_ENABLE_MSACC
+#ifdef ERTS_ENABLE_MSACC
-#ifdef USE_THREADS
-extern erts_tsd_key_t erts_msacc_key;
-#else
-extern ErtsMsAcc *erts_msacc;
-#endif
+extern erts_tsd_key_t ERTS_WRITE_UNLIKELY(erts_msacc_key);
#ifdef ERTS_MSACC_ALWAYS_ON
#define erts_msacc_enabled 1
#else
-extern int erts_msacc_enabled;
+extern int ERTS_WRITE_UNLIKELY(erts_msacc_enabled);
#endif
-#ifdef USE_THREADS
#define ERTS_MSACC_TSD_GET() erts_tsd_get(erts_msacc_key)
#define ERTS_MSACC_TSD_SET(tsd) erts_tsd_set(erts_msacc_key,tsd)
-#else
-#define ERTS_MSACC_TSD_GET() erts_msacc
-#define ERTS_MSACC_TSD_SET(tsd) erts_msacc = tsd
-#endif
void erts_msacc_early_init(void);
void erts_msacc_init(void);
@@ -279,18 +270,32 @@ void erts_msacc_init_thread(char *type, int id, int liberty);
#define ERTS_MSACC_PUSH_STATE_M() \
ERTS_MSACC_DECLARE_CACHE(); \
ERTS_MSACC_PUSH_STATE_CACHED_M()
-#define ERTS_MSACC_PUSH_STATE_CACHED_M() \
- __erts_msacc_state = ERTS_MSACC_IS_ENABLED_CACHED() ? \
- erts_msacc_get_state_m__(__erts_msacc_cache) : ERTS_MSACC_STATE_OTHER
+#define ERTS_MSACC_PUSH_STATE_CACHED_M() \
+ do { \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) { \
+ ASSERT(!__erts_msacc_cache->unmanaged); \
+ __erts_msacc_state = erts_msacc_get_state_m__(__erts_msacc_cache); \
+ } else { \
+ __erts_msacc_state = ERTS_MSACC_STATE_OTHER; \
+ } \
+ } while(0)
#define ERTS_MSACC_SET_STATE_M(state) \
ERTS_MSACC_DECLARE_CACHE(); \
ERTS_MSACC_SET_STATE_CACHED_M(state)
-#define ERTS_MSACC_SET_STATE_CACHED_M(state) \
- if (ERTS_MSACC_IS_ENABLED_CACHED()) \
- erts_msacc_set_state_m__(__erts_msacc_cache, state, 1)
-#define ERTS_MSACC_POP_STATE_M() \
- if (ERTS_MSACC_IS_ENABLED_CACHED()) \
- erts_msacc_set_state_m__(__erts_msacc_cache, __erts_msacc_state, 0)
+#define ERTS_MSACC_SET_STATE_CACHED_M(state) \
+ do { \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) { \
+ ASSERT(!__erts_msacc_cache->unmanaged); \
+ erts_msacc_set_state_m__(__erts_msacc_cache, state, 1); \
+ } \
+ } while(0)
+#define ERTS_MSACC_POP_STATE_M() \
+ do { \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) { \
+ ASSERT(!__erts_msacc_cache->unmanaged); \
+ erts_msacc_set_state_m__(__erts_msacc_cache, __erts_msacc_state, 0); \
+ } \
+ } while(0)
#define ERTS_MSACC_PUSH_AND_SET_STATE_M(state) \
ERTS_MSACC_PUSH_STATE_M(); ERTS_MSACC_SET_STATE_CACHED_M(state)
@@ -327,8 +332,8 @@ ERTS_GLB_INLINE
void erts_msacc_set_state_um__(ErtsMsAcc *msacc, Uint new_state, int increment) {
if (ERTS_UNLIKELY(msacc->unmanaged)) {
erts_mtx_lock(&msacc->mtx);
- msacc->state = new_state;
if (ERTS_LIKELY(!msacc->perf_counter)) {
+ msacc->state = new_state;
erts_mtx_unlock(&msacc->mtx);
return;
}
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index e275867928..6e0a0dcff7 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -314,7 +314,7 @@ disable_trace(int error, char *reason, int eno)
erts_fprintf(stderr, "%s: %s\n", mt_dis, reason);
else {
eno_str = erl_errno_id(eno);
- if (strcmp(eno_str, "unknown") == 0)
+ if (sys_strcmp(eno_str, "unknown") == 0)
erts_fprintf(stderr, "%s: %s: %d\n", mt_dis, reason, eno);
else
erts_fprintf(stderr, "%s: %s: %s\n", mt_dis, reason, eno_str);
@@ -401,9 +401,9 @@ write_trace_header(char *nodename, char *pid, char *hostname)
PUT_UI32(tracep, ERTS_MT_MAJOR_VSN);
PUT_UI32(tracep, ERTS_MT_MINOR_VSN);
- n_len = strlen(nodename);
- h_len = strlen(hostname);
- p_len = strlen(pid);
+ n_len = sys_strlen(nodename);
+ h_len = sys_strlen(hostname);
+ p_len = sys_strlen(pid);
hdr_prolog_len = (2*UI32_SZ
+ 3*UI16_SZ
+ 3*UI32_SZ
@@ -436,15 +436,15 @@ write_trace_header(char *nodename, char *pid, char *hostname)
PUT_UI32(tracep, start_time.usec);
PUT_UI8(tracep, (byte) n_len);
- memcpy((void *) tracep, (void *) nodename, n_len);
+ sys_memcpy((void *) tracep, (void *) nodename, n_len);
tracep += n_len;
PUT_UI8(tracep, (byte) h_len);
- memcpy((void *) tracep, (void *) hostname, h_len);
+ sys_memcpy((void *) tracep, (void *) hostname, h_len);
tracep += h_len;
PUT_UI8(tracep, (byte) p_len);
- memcpy((void *) tracep, (void *) pid, p_len);
+ sys_memcpy((void *) tracep, (void *) pid, p_len);
tracep += p_len;
ASSERT(startp + hdr_prolog_len == tracep);
@@ -472,7 +472,7 @@ write_trace_header(char *nodename, char *pid, char *hostname)
str = ERTS_ALC_A2AD(i);
ASSERT(str);
- str_len = strlen(str);
+ str_len = sys_strlen(str);
if (str_len >= (1 << 8)) {
disable_trace(1, "Excessively large allocator string", 0);
return 0;
@@ -493,7 +493,7 @@ write_trace_header(char *nodename, char *pid, char *hostname)
PUT_UI16(tracep, aflags);
PUT_UI16(tracep, (Uint16) i);
PUT_UI8( tracep, (byte) str_len);
- memcpy((void *) tracep, (void *) str, str_len);
+ sys_memcpy((void *) tracep, (void *) str, str_len);
tracep += str_len;
if (erts_allctrs_info[i].alloc_util) {
PUT_UI8(tracep, 2);
@@ -519,7 +519,7 @@ write_trace_header(char *nodename, char *pid, char *hostname)
str = ERTS_ALC_N2TD(i);
ASSERT(str);
- str_len = strlen(str);
+ str_len = sys_strlen(str);
if (str_len >= (1 << 8)) {
disable_trace(1, "Excessively large type string", 0);
return 0;
@@ -543,7 +543,7 @@ write_trace_header(char *nodename, char *pid, char *hostname)
PUT_UI16(tracep, nflags);
PUT_UI16(tracep, (Uint16) i);
PUT_UI8(tracep, (byte) str_len);
- memcpy((void *) tracep, (void *) str, str_len);
+ sys_memcpy((void *) tracep, (void *) str, str_len);
tracep += str_len;
PUT_UI16(tracep, no);
ASSERT(startp + entry_sz == tracep);
@@ -572,7 +572,7 @@ void erts_mtrace_pre_init(void)
void erts_mtrace_init(char *receiver, char *nodename)
{
- char hostname[MAXHOSTNAMELEN];
+ char hostname[MAXHOSTNAMELEN + 1];
char pid[21]; /* enough for a 64 bit number */
socket_desc = ERTS_SOCK_INVALID_SOCKET;
@@ -583,8 +583,10 @@ void erts_mtrace_init(char *receiver, char *nodename)
byte ip_addr[4];
Uint16 port;
- erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf");
- erts_mtx_init(&mtrace_op_mutex, "mtrace_op");
+ erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
+ erts_mtx_init(&mtrace_op_mutex, "mtrace_op", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
socket_desc = erts_sock_open();
if (socket_desc == ERTS_SOCK_INVALID_SOCKET) {
@@ -613,9 +615,10 @@ void erts_mtrace_init(char *receiver, char *nodename)
}
tracep = trace_buffer;
endp = trace_buffer + TRACE_BUF_SZ;
- if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN) != 0)
+ /* gethostname requires that the len is max(hostname) + 1 */
+ if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN + 1) != 0)
hostname[0] = '\0';
- hostname[MAXHOSTNAMELEN-1] = '\0';
+ hostname[MAXHOSTNAMELEN] = '\0';
sys_get_pid(pid, sizeof(pid));
write_trace_header(nodename ? nodename : "", pid, hostname);
erts_mtrace_update_heap_size();
diff --git a/erts/emulator/beam/erl_nfunc_sched.c b/erts/emulator/beam/erl_nfunc_sched.c
new file mode 100644
index 0000000000..b8cf2bee0e
--- /dev/null
+++ b/erts/emulator/beam/erl_nfunc_sched.c
@@ -0,0 +1,180 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2016-2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+
+#include "global.h"
+#include "erl_process.h"
+#include "bif.h"
+#include "erl_nfunc_sched.h"
+#include "erl_trace.h"
+
+NifExport *
+erts_new_proc_nif_export(Process *c_p, int argc)
+{
+ size_t size;
+ int i;
+ NifExport *nep, *old_nep;
+
+ size = sizeof(NifExport) + (argc-1)*sizeof(Eterm);
+ nep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, size);
+
+ for (i = 0; i < ERTS_NUM_CODE_IX; i++)
+ nep->exp.addressv[i] = &nep->exp.beam[0];
+
+ nep->argc = -1; /* unused marker */
+ nep->argv_size = argc;
+ nep->trace = NULL;
+ old_nep = ERTS_PROC_SET_NIF_TRAP_EXPORT(c_p, nep);
+ if (old_nep) {
+ ASSERT(!nep->trace);
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, old_nep);
+ }
+ return nep;
+}
+
+void
+erts_destroy_nif_export(Process *p)
+{
+ NifExport *nep = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL);
+ if (nep) {
+ if (nep->m)
+ erts_nif_export_cleanup_nif_mod(nep);
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, nep);
+ }
+}
+
+void
+erts_nif_export_save_trace(Process *c_p, NifExport *nep, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
+ NifExportTrace *netp;
+ ASSERT(nep && nep->argc >= 0);
+ ASSERT(!nep->trace);
+ netp = erts_alloc(ERTS_ALC_T_NIF_EXP_TRACE,
+ sizeof(NifExportTrace));
+ netp->applying = applying;
+ netp->ep = ep;
+ netp->cp = cp;
+ netp->flags = flags;
+ netp->flags_meta = flags_meta;
+ netp->I = I;
+ netp->meta_tracer = NIL;
+ erts_tracer_update(&netp->meta_tracer, meta_tracer);
+ nep->trace = netp;
+}
+
+void
+erts_nif_export_restore_trace(Process *c_p, Eterm result, NifExport *nep)
+{
+ NifExportTrace *netp = nep->trace;
+ nep->trace = NULL;
+ erts_bif_trace_epilogue(c_p, result, netp->applying, netp->ep,
+ netp->cp, netp->flags, netp->flags_meta,
+ netp->I, netp->meta_tracer);
+ erts_tracer_update(&netp->meta_tracer, NIL);
+ erts_free(ERTS_ALC_T_NIF_EXP_TRACE, netp);
+}
+
+NifExport *
+erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ BeamInstr instr,
+ void *dfunc, void *ifunc,
+ Eterm mod, Eterm func,
+ int argc, const Eterm *argv)
+{
+ Process *used_proc;
+ ErtsSchedulerData *esdp;
+ Eterm* reg;
+ NifExport* nep;
+ int i;
+
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ if (dirty_shadow_proc) {
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp && ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ used_proc = dirty_shadow_proc;
+ }
+ else {
+ esdp = erts_proc_sched_data(c_p);
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ used_proc = c_p;
+ ERTS_VBUMP_ALL_REDS(c_p);
+ }
+
+ reg = esdp->x_reg_array;
+
+ if (mfa)
+ nep = erts_get_proc_nif_export(c_p, (int) mfa->arity);
+ else {
+ /* If no mfa, this is not the first schedule... */
+ nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ ASSERT(nep && nep->argc >= 0);
+ }
+
+ if (nep->argc < 0) {
+ /*
+ * First schedule; save things that might
+ * need to be restored...
+ */
+ for (i = 0; i < (int) mfa->arity; i++)
+ nep->argv[i] = reg[i];
+ nep->pc = pc;
+ nep->cp = c_p->cp;
+ nep->mfa = mfa;
+ nep->current = c_p->current;
+ ASSERT(argc >= 0);
+ nep->argc = (int) mfa->arity;
+ nep->m = NULL;
+
+ ASSERT(!erts_check_nif_export_in_area(c_p,
+ (char *) nep,
+ (sizeof(NifExport)
+ + (sizeof(Eterm)
+ *(nep->argc-1)))));
+ }
+ /* Copy new arguments into register array if necessary... */
+ if (reg != argv) {
+ for (i = 0; i < argc; i++)
+ reg[i] = argv[i];
+ }
+ ASSERT(is_atom(mod) && is_atom(func));
+ nep->exp.info.mfa.module = mod;
+ nep->exp.info.mfa.function = func;
+ nep->exp.info.mfa.arity = (Uint) argc;
+ nep->exp.beam[0] = (BeamInstr) instr; /* call_nif || apply_bif */
+ nep->exp.beam[1] = (BeamInstr) dfunc;
+ nep->func = ifunc;
+ used_proc->arity = argc;
+ used_proc->freason = TRAP;
+ used_proc->i = (BeamInstr*) nep->exp.addressv[0];
+ return nep;
+}
diff --git a/erts/emulator/beam/erl_nfunc_sched.h b/erts/emulator/beam/erl_nfunc_sched.h
new file mode 100644
index 0000000000..1cb252eba5
--- /dev/null
+++ b/erts/emulator/beam/erl_nfunc_sched.h
@@ -0,0 +1,328 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2016-2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_NFUNC_SCHED_H__
+#define ERL_NFUNC_SCHED_H__
+
+#include "erl_process.h"
+#include "bif.h"
+#include "error.h"
+
+typedef struct {
+ int applying;
+ Export* ep;
+ BeamInstr *cp;
+ Uint32 flags;
+ Uint32 flags_meta;
+ BeamInstr* I;
+ ErtsTracer meta_tracer;
+} NifExportTrace;
+
+/*
+ * NIF exports need a few more items than the Export struct provides,
+ * including the erl_module_nif* and a NIF function pointer, so the
+ * NifExport below adds those. The Export member must be first in the
+ * struct. A number of values are stored for error handling purposes
+ * only.
+ *
+ * 'argc' is >= 0 when NifExport is in use, and < 0 when not.
+ */
+
+typedef struct {
+ Export exp;
+ struct erl_module_nif* m; /* NIF module, or NULL if BIF */
+ void *func; /* Indirect NIF or BIF to execute (may be unused) */
+ ErtsCodeMFA *current;/* Current as set when originally called */
+ NifExportTrace *trace;
+ /* --- The following is only used on error --- */
+ BeamInstr *pc; /* Program counter */
+ BeamInstr *cp; /* Continuation pointer */
+ ErtsCodeMFA *mfa; /* MFA of original call */
+ int argc; /* Number of arguments in original call */
+ int argv_size; /* Allocated size of argv */
+ Eterm argv[1]; /* Saved arguments from the original call */
+} NifExport;
+
+NifExport *erts_new_proc_nif_export(Process *c_p, int argc);
+void erts_nif_export_save_trace(Process *c_p, NifExport *nep, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
+void erts_nif_export_restore_trace(Process *c_p, Eterm result, NifExport *nep);
+void erts_destroy_nif_export(Process *p);
+NifExport *erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc,
+ ErtsCodeMFA *mfa, BeamInstr *pc,
+ BeamInstr instr,
+ void *dfunc, void *ifunc,
+ Eterm mod, Eterm func,
+ int argc, const Eterm *argv);
+void erts_nif_export_cleanup_nif_mod(NifExport *ep); /* erl_nif.c */
+ERTS_GLB_INLINE NifExport *erts_get_proc_nif_export(Process *c_p, int extra);
+ERTS_GLB_INLINE int erts_setup_nif_export_rootset(Process* proc, Eterm** objv,
+ Uint* nobj);
+ERTS_GLB_INLINE int erts_check_nif_export_in_area(Process *p,
+ char *start, Uint size);
+ERTS_GLB_INLINE void erts_nif_export_restore(Process *c_p, NifExport *ep,
+ Eterm result);
+ERTS_GLB_INLINE void erts_nif_export_restore_error(Process* c_p, BeamInstr **pc,
+ Eterm *reg, ErtsCodeMFA **nif_mfa);
+ERTS_GLB_INLINE int erts_nif_export_check_save_trace(Process *c_p, Eterm result,
+ int applying, Export* ep,
+ BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
+ERTS_GLB_INLINE Process *erts_proc_shadow2real(Process *c_p);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE NifExport *
+erts_get_proc_nif_export(Process *c_p, int argc)
+{
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ if (!nep || (nep->argc < 0 && nep->argv_size < argc))
+ return erts_new_proc_nif_export(c_p, argc);
+ return nep;
+}
+
+/*
+ * If a process has saved arguments, they need to be part of the GC
+ * rootset. The function below is called from setup_rootset() in
+ * erl_gc.c. Any exception term saved in the NifExport is also made
+ * part of the GC rootset here; it always resides in rootset[0].
+ */
+ERTS_GLB_INLINE int
+erts_setup_nif_export_rootset(Process* proc, Eterm** objv, Uint* nobj)
+{
+ NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+
+ if (!ep || ep->argc <= 0)
+ return 0;
+
+ *objv = ep->argv;
+ *nobj = ep->argc;
+ return 1;
+}
+
+/*
+ * Check if nif export points into code area...
+ */
+ERTS_GLB_INLINE int
+erts_check_nif_export_in_area(Process *p, char *start, Uint size)
+{
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(p);
+ if (!nep || nep->argc < 0)
+ return 0;
+ if (ErtsInArea(nep->pc, start, size))
+ return 1;
+ if (ErtsInArea(nep->cp, start, size))
+ return 1;
+ if (ErtsInArea(nep->mfa, start, size))
+ return 1;
+ if (ErtsInArea(nep->current, start, size))
+ return 1;
+ return 0;
+}
+
+ERTS_GLB_INLINE void
+erts_nif_export_restore(Process *c_p, NifExport *ep, Eterm result)
+{
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ ERTS_LC_ASSERT(!(c_p->static_flags
+ & ERTS_STC_FLG_SHADOW_PROC));
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ c_p->current = ep->current;
+ ep->argc = -1; /* Unused nif-export marker... */
+ if (ep->trace)
+ erts_nif_export_restore_trace(c_p, result, ep);
+}
+
+ERTS_GLB_INLINE void
+erts_nif_export_restore_error(Process* c_p, BeamInstr **pc,
+ Eterm *reg, ErtsCodeMFA **nif_mfa)
+{
+ NifExport *nep = (NifExport *) ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ int ix;
+
+ ASSERT(nep);
+ *pc = nep->pc;
+ c_p->cp = nep->cp;
+ *nif_mfa = nep->mfa;
+ for (ix = 0; ix < nep->argc; ix++)
+ reg[ix] = nep->argv[ix];
+ erts_nif_export_restore(c_p, nep, THE_NON_VALUE);
+}
+
+ERTS_GLB_INLINE int
+erts_nif_export_check_save_trace(Process *c_p, Eterm result,
+ int applying, Export* ep,
+ BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer)
+{
+ if (is_non_value(result) && c_p->freason == TRAP) {
+ NifExport *nep = ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p);
+ if (nep && nep->argc >= 0) {
+ erts_nif_export_save_trace(c_p, nep, applying, ep,
+ cp, flags, flags_meta,
+ I, meta_tracer);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+ERTS_GLB_INLINE Process *
+erts_proc_shadow2real(Process *c_p)
+{
+ if (c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ Process *real_c_p = c_p->next;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ ASSERT(real_c_p->common.id == c_p->common.id);
+ return real_c_p;
+ }
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
+ return c_p;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERL_NFUNC_SCHED_H__ */
+
+#if defined(ERTS_WANT_NFUNC_SCHED_INTERNALS__) && !defined(ERTS_NFUNC_SCHED_INTERNALS__)
+#define ERTS_NFUNC_SCHED_INTERNALS__
+
+#define ERTS_I_BEAM_OP_TO_NIF_EXPORT(I) \
+ (ASSERT(BeamIsOpCode(*(I), op_apply_bif) || \
+ BeamIsOpCode(*(I), op_call_nif)), \
+ ((NifExport *) (((char *) (I)) - offsetof(NifExport, exp.beam[0]))))
+
+
+#include "erl_message.h"
+#include <stddef.h>
+
+ERTS_GLB_INLINE void erts_flush_dirty_shadow_proc(Process *sproc);
+ERTS_GLB_INLINE void erts_cache_dirty_shadow_proc(Process *sproc);
+ERTS_GLB_INLINE Process *erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp,
+ Process *c_p);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_flush_dirty_shadow_proc(Process *sproc)
+{
+ Process *c_p = sproc->next;
+
+ ASSERT(sproc->common.id == c_p->common.id);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(c_p->stop == sproc->stop);
+ ASSERT(c_p->hend == sproc->hend);
+ ASSERT(c_p->heap == sproc->heap);
+ ASSERT(c_p->abandoned_heap == sproc->abandoned_heap);
+ ASSERT(c_p->heap_sz == sproc->heap_sz);
+ ASSERT(c_p->high_water == sproc->high_water);
+ ASSERT(c_p->old_heap == sproc->old_heap);
+ ASSERT(c_p->old_htop == sproc->old_htop);
+ ASSERT(c_p->old_hend == sproc->old_hend);
+
+ ASSERT(c_p->htop <= sproc->htop && sproc->htop <= c_p->stop);
+
+ c_p->htop = sproc->htop;
+
+ if (!c_p->mbuf)
+ c_p->mbuf = sproc->mbuf;
+ else if (sproc->mbuf) {
+ ErlHeapFragment *bp;
+ for (bp = sproc->mbuf; bp->next; bp = bp->next)
+ ASSERT(!bp->off_heap.first);
+ bp->next = c_p->mbuf;
+ c_p->mbuf = sproc->mbuf;
+ }
+
+ c_p->mbuf_sz += sproc->mbuf_sz;
+
+ if (!c_p->off_heap.first)
+ c_p->off_heap.first = sproc->off_heap.first;
+ else if (sproc->off_heap.first) {
+ struct erl_off_heap_header *ohhp;
+ for (ohhp = sproc->off_heap.first; ohhp->next; ohhp = ohhp->next)
+ ;
+ ohhp->next = c_p->off_heap.first;
+ c_p->off_heap.first = sproc->off_heap.first;
+ }
+
+ c_p->off_heap.overhead += sproc->off_heap.overhead;
+}
+
+ERTS_GLB_INLINE void
+erts_cache_dirty_shadow_proc(Process *sproc)
+{
+ Process *c_p = sproc->next;
+ ASSERT(c_p);
+ ASSERT(sproc->common.id == c_p->common.id);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ sproc->htop = c_p->htop;
+ sproc->stop = c_p->stop;
+ sproc->hend = c_p->hend;
+ sproc->heap = c_p->heap;
+ sproc->abandoned_heap = c_p->abandoned_heap;
+ sproc->heap_sz = c_p->heap_sz;
+ sproc->high_water = c_p->high_water;
+ sproc->old_hend = c_p->old_hend;
+ sproc->old_htop = c_p->old_htop;
+ sproc->old_heap = c_p->old_heap;
+ sproc->mbuf = NULL;
+ sproc->mbuf_sz = 0;
+ ERTS_INIT_OFF_HEAP(&sproc->off_heap);
+}
+
+ERTS_GLB_INLINE Process *
+erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp, Process *c_p)
+{
+ Process *sproc;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ sproc = esdp->dirty_shadow_process;
+ ASSERT(sproc);
+ ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(erts_atomic32_read_nob(&sproc->state)
+ == (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_PROXY));
+
+ sproc->next = c_p;
+ sproc->common.id = c_p->common.id;
+
+ erts_cache_dirty_shadow_proc(sproc);
+
+ return sproc;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+#endif /* defined(ERTS_WANT_NFUNC_SCHED_INTERNALS__) && !defined(ERTS_NFUNC_SCHED_INTERNALS__) */
+
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 3a547982da..ee6e6085b6 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -55,6 +55,12 @@
#include "dtrace-wrapper.h"
#include "erl_process.h"
#include "erl_bif_unique.h"
+#include "erl_utils.h"
+#include "erl_io_queue.h"
+#include "erl_proc_sig_queue.h"
+#undef ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
#if defined(USE_DYNAMIC_TRACE) && (defined(USE_DTRACE) || defined(USE_SYSTEMTAP))
#define HAVE_USE_DTRACE 1
#endif
@@ -62,7 +68,6 @@
#include <limits.h>
#include <stddef.h> /* offsetof */
-
/* Information about a loaded nif library.
* Each successful call to erlang:load_nif will allocate an instance of
* erl_module_nif. Two calls opening the same library will thus have the same
@@ -71,14 +76,19 @@
struct erl_module_nif {
void* priv_data;
void* handle; /* "dlopen" */
- struct enif_entry_t* entry;
+ struct enif_entry_t entry;
erts_refc_t rt_cnt; /* number of resource types */
erts_refc_t rt_dtor_cnt; /* number of resource types with destructors */
Module* mod; /* Can be NULL if orphan with dtor-resources left */
+
+ ErlNifFunc _funcs_copy_[1]; /* only used for old libs */
};
+typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
#ifdef DEBUG
# define READONLY_CHECK
+# define ERTS_DBG_NIF_NOT_SCHED_MARKER ((void *) (UWord) 1)
#endif
#ifdef READONLY_CHECK
# define ADD_READONLY_CHECK(ENV,PTR,SIZE) add_readonly_check(ENV,PTR,SIZE)
@@ -87,6 +97,14 @@ static void add_readonly_check(ErlNifEnv*, unsigned char* ptr, unsigned sz);
# define ADD_READONLY_CHECK(ENV,PTR,SIZE) ((void)0)
#endif
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+# define ASSERT_IN_ENV(ENV, TERM, NR, TYPE) dbg_assert_in_env(ENV, TERM, NR, TYPE, __func__)
+static void dbg_assert_in_env(ErlNifEnv*, Eterm term, int nr, const char* type, const char* func);
+# include "erl_gc.h"
+#else
+# define ASSERT_IN_ENV(ENV, TERM, NR, TYPE)
+#endif
+
#ifdef DEBUG
static int is_offheap(const ErlOffHeap* off_heap);
#endif
@@ -121,7 +139,7 @@ execution_state(ErlNifEnv *env, Process **c_pp, int *schedp)
Process *c_p = env->proc;
if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) {
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
& ERTS_PROC_LOCK_MAIN);
}
else {
@@ -194,13 +212,16 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
ASSERT(p->common.id != ERTS_INVALID_PID);
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ env->dbg_disable_assert_in_env = 0;
+#endif
#if defined(DEBUG) && defined(ERTS_DIRTY_SCHEDULERS)
{
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ASSERT(esdp);
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&p->state);
ASSERT(p->scheduler_data == esdp);
ASSERT((state & (ERTS_PSFLG_RUNNING
@@ -217,41 +238,11 @@ static void cache_env(ErlNifEnv* env);
static void full_flush_env(ErlNifEnv *env);
static void flush_env(ErlNifEnv* env);
-#ifdef ERTS_DIRTY_SCHEDULERS
-void erts_pre_dirty_nif(ErtsSchedulerData *esdp,
- ErlNifEnv* env, Process* p,
- struct erl_module_nif* mod_nif)
-{
- Process *sproc;
-#ifdef DEBUG
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
-
- ASSERT(!p->scheduler_data);
- ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
- && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
- ASSERT(esdp);
-#endif
-
- erts_pre_nif(env, p, mod_nif, NULL);
-
- sproc = esdp->dirty_shadow_process;
- ASSERT(sproc);
- ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
- ASSERT(erts_smp_atomic32_read_nob(&sproc->state)
- == (ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_PROXY));
-
- sproc->next = p;
- sproc->common.id = p->common.id;
- env->proc = sproc;
- full_cache_env(env);
-}
-#endif
-
-/* Temporary object header, auto-deallocated when NIF returns
- * or when independent environment is cleared.
- */
+/* Temporary object header, auto-deallocated when NIF returns or when
+ * independent environment is cleared.
+ *
+ * The payload can be accessed with &tmp_obj_ptr[1] but keep in mind that its
+ * first element must not require greater alignment than `next`. */
struct enif_tmp_obj_t {
struct enif_tmp_obj_t* next;
void (*dtor)(struct enif_tmp_obj_t*);
@@ -268,6 +259,46 @@ static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env)
}
}
+/* Whether the given environment is bound to a process and will be cleaned up
+ * when the NIF returns. It's safe to use temp_alloc for objects in
+ * env->tmp_obj_list when this is true. */
+static ERTS_INLINE int is_proc_bound(ErlNifEnv *env)
+{
+ return env->mod_nif != NULL;
+}
+
+/* Allocates and attaches an object to the given environment, running its
+ * destructor when the environment is cleared. To avoid temporary variables the
+ * address of the allocated object is returned instead of the enif_tmp_obj_t.
+ *
+ * The destructor *must* call `erts_free(tmp_obj->allocator, tmp_obj)` to free
+ * the object. If the destructor needs to refer to the allocated object its
+ * address will be &tmp_obj[1]. */
+static ERTS_INLINE void *alloc_tmp_obj(ErlNifEnv *env, size_t size,
+ void (*dtor)(struct enif_tmp_obj_t*)) {
+ struct enif_tmp_obj_t *tmp_obj;
+ ErtsAlcType_t allocator;
+
+ allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
+
+ tmp_obj = erts_alloc(allocator, sizeof(struct enif_tmp_obj_t) + MAX(1, size));
+
+ tmp_obj->next = env->tmp_obj_list;
+ tmp_obj->allocator = allocator;
+ tmp_obj->dtor = dtor;
+
+ env->tmp_obj_list = tmp_obj;
+
+ return (void*)&tmp_obj[1];
+}
+
+/* Generic destructor for objects allocated through alloc_tmp_obj that don't
+ * care about their payload. */
+static void tmp_alloc_dtor(struct enif_tmp_obj_t *tmp_obj)
+{
+ erts_free(tmp_obj->allocator, tmp_obj);
+}
+
void erts_post_nif(ErlNifEnv* env)
{
erts_unblock_fpe(env->fpe_was_unmasked);
@@ -276,115 +307,164 @@ void erts_post_nif(ErlNifEnv* env)
env->exiting = ERTS_PROC_IS_EXITING(env->proc);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
-void erts_post_dirty_nif(ErlNifEnv* env)
+
+/*
+ * Initialize a NifExport struct. Create it if needed and store it in the
+ * proc. The direct_fp function is what will be invoked by op_call_nif, and
+ * the indirect_fp function, if not NULL, is what the direct_fp function
+ * will call. If the allocated NifExport isn't enough to hold all of argv,
+ * allocate a larger one. Save 'current' and registers if first time this
+ * call is scheduled.
+ */
+
+static ERTS_INLINE ERL_NIF_TERM
+schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
+ Eterm mod, Eterm func_name, int argc, const ERL_NIF_TERM argv[])
{
- Process *c_p;
- ASSERT(env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
- ASSERT(env->proc->next);
- erts_unblock_fpe(env->fpe_was_unmasked);
- full_flush_env(env);
- free_tmp_objs(env);
- c_p = env->proc->next;
- env->exiting = ERTS_PROC_IS_EXITING(c_p);
- ERTS_VBUMP_ALL_REDS(c_p);
+ NifExport *ep;
+ Process *c_p, *dirty_shadow_proc;
+
+ execution_state(env, &c_p, NULL);
+ if (c_p == env->proc)
+ dirty_shadow_proc = NULL;
+ else
+ dirty_shadow_proc = env->proc;
+
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+
+ ep = erts_nif_export_schedule(c_p, dirty_shadow_proc,
+ c_p->current,
+ c_p->cp,
+ BeamOpCodeAddr(op_call_nif),
+ direct_fp, indirect_fp,
+ mod, func_name,
+ argc, (const Eterm *) argv);
+ if (!ep->m) {
+ /* First time this call is scheduled... */
+ erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ ep->m = env->mod_nif;
+ }
+ return (ERL_NIF_TERM) THE_NON_VALUE;
}
-#endif
-static void full_flush_env(ErlNifEnv* env)
+
+static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+
+int
+erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
- /* Dirty nif call using shadow process struct */
- Process *c_p = env->proc->next;
-
- ASSERT(is_scheduler() < 0);
- ASSERT(env->proc->common.id == c_p->common.id);
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
- & ERTS_PROC_LOCK_MAIN);
-
- if (!env->heap_frag) {
- ASSERT(env->hp_end == HEAP_LIMIT(c_p));
- ASSERT(env->hp >= HEAP_TOP(c_p));
- ASSERT(env->hp <= HEAP_LIMIT(c_p));
- HEAP_TOP(c_p) = env->hp;
- }
- else {
- Uint usz;
- ASSERT(env->hp_end != HEAP_LIMIT(c_p));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
-
- HEAP_TOP(c_p) = HEAP_TOP(env->proc);
- usz = env->hp - env->heap_frag->mem;
- env->proc->mbuf_sz += usz - env->heap_frag->used_size;
- env->heap_frag->used_size = usz;
-
- ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
-
- if (c_p->mbuf) {
- ErlHeapFragment *bp;
- for (bp = env->proc->mbuf; bp->next; bp = bp->next)
- ;
- bp->next = c_p->mbuf;
- }
+ int exiting;
+ ERL_NIF_TERM *argv = (ERL_NIF_TERM *) reg;
+ NifExport *nep = ERTS_I_BEAM_OP_TO_NIF_EXPORT(I);
+ ErtsCodeMFA *codemfa = erts_code_to_codemfa(I);
+ NativeFunPtr dirty_nif = (NativeFunPtr) I[1];
+ ErlNifEnv env;
+ ERL_NIF_TERM result;
+#ifdef DEBUG
+ erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
- c_p->mbuf = env->proc->mbuf;
- c_p->mbuf_sz += env->proc->mbuf_sz;
+ ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p));
- }
+ ASSERT(!c_p->scheduler_data);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(esdp);
- if (!c_p->off_heap.first)
- c_p->off_heap.first = env->proc->off_heap.first;
- else if (env->proc->off_heap.first) {
- struct erl_off_heap_header *ohhp;
- for (ohhp = env->proc->off_heap.first; ohhp->next; ohhp = ohhp->next)
- ;
- ohhp->next = c_p->off_heap.first;
- c_p->off_heap.first = env->proc->off_heap.first;
- }
- c_p->off_heap.overhead += env->proc->off_heap.overhead;
+ nep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
+#endif
+
+ erts_pre_nif(&env, c_p, nep->m, NULL);
+
+ env.proc = erts_make_dirty_shadow_proc(esdp, c_p);
+
+ env.proc->freason = EXC_NULL;
+ env.proc->fvalue = NIL;
+ env.proc->ftrace = NIL;
+ env.proc->i = c_p->i;
- return;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
+
+ erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC));
+
+ ASSERT(esdp->current_nif == NULL);
+ esdp->current_nif = &env;
+
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(esdp->current_nif == &env);
+ esdp->current_nif = NULL;
+
+ ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(env.proc->next == c_p);
+
+ exiting = ERTS_PROC_IS_EXITING(c_p);
+
+ if (!exiting) {
+ if (env.exception_thrown) {
+ schedule_exception:
+ schedule(&env, dirty_nif_exception, NULL,
+ am_erts_internal, am_dirty_nif_exception,
+ 1, &env.proc->fvalue);
+ }
+ else if (is_value(result)) {
+ schedule(&env, dirty_nif_finalizer, NULL,
+ am_erts_internal, am_dirty_nif_finalizer,
+ 1, &result);
+ }
+ else if (env.proc->freason != TRAP) { /* user returned garbage... */
+ ERTS_DECL_AM(badreturn);
+ (void) enif_raise_exception(&env, AM_badreturn);
+ goto schedule_exception;
+ }
+ else {
+ /* Rescheduled by dirty NIF call... */
+ ASSERT(nep->func != ERTS_DBG_NIF_NOT_SCHED_MARKER);
+ }
+ c_p->i = env.proc->i;
+ c_p->arity = env.proc->arity;
}
+
+#ifdef DEBUG
+ if (nep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
+ nep->func = NULL;
#endif
+ erts_unblock_fpe(env.fpe_was_unmasked);
+ full_flush_env(&env);
+ free_tmp_objs(&env);
+
+ return exiting;
+}
+
+
+static void full_flush_env(ErlNifEnv* env)
+{
flush_env(env);
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ /* Dirty nif call using shadow process struct */
+ erts_flush_dirty_shadow_proc(env->proc);
}
static void full_cache_env(ErlNifEnv* env)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
- /* Dirty nif call using shadow process struct */
- Process *sproc = env->proc;
- Process *c_p = sproc->next;
- ASSERT(c_p);
- ASSERT(is_scheduler() < 0);
- ASSERT(env->proc->common.id == c_p->common.id);
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
- & ERTS_PROC_LOCK_MAIN);
-
- sproc->htop = c_p->htop;
- sproc->stop = c_p->stop;
- sproc->hend = c_p->hend;
- sproc->heap = c_p->heap;
- sproc->abandoned_heap = c_p->abandoned_heap;
- sproc->heap_sz = c_p->heap_sz;
- sproc->high_water = c_p->high_water;
- sproc->old_hend = c_p->old_hend;
- sproc->old_htop = c_p->old_htop;
- sproc->old_heap = c_p->old_heap;
- sproc->mbuf = NULL;
- sproc->mbuf_sz = 0;
- ERTS_INIT_OFF_HEAP(&sproc->off_heap);
-
- env->hp_end = HEAP_LIMIT(c_p);
- env->hp = HEAP_TOP(c_p);
- env->heap_frag = NULL;
- return;
+ erts_cache_dirty_shadow_proc(env->proc);
+ /*
+ * If shadow proc had heap fragments when flushed
+ * those have now been moved to the real proc.
+ * Ensure heap pointers do not point into a heap
+ * fragment on real proc...
+ */
+ ASSERT(!env->proc->mbuf);
+ env->hp_end = HEAP_LIMIT(env->proc);
+ env->hp = HEAP_TOP(env->proc);
}
-#endif
-
cache_env(env);
}
@@ -425,6 +505,7 @@ static void cache_env(ErlNifEnv* env)
env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;
}
}
+
void* enif_priv_data(ErlNifEnv* env)
{
return env->mod_nif->priv_data;
@@ -465,18 +546,22 @@ setup_nif_env(struct enif_msg_environment_t* msg_env,
msg_env->env.tmp_obj_list = NULL;
msg_env->env.proc = &msg_env->phony_proc;
msg_env->env.exception_thrown = 0;
- memset(&msg_env->phony_proc, 0, sizeof(Process));
+ sys_memset(&msg_env->phony_proc, 0, sizeof(Process));
HEAP_START(&msg_env->phony_proc) = phony_heap;
HEAP_TOP(&msg_env->phony_proc) = phony_heap;
HEAP_LIMIT(&msg_env->phony_proc) = phony_heap;
HEAP_END(&msg_env->phony_proc) = phony_heap;
MBUF(&msg_env->phony_proc) = NULL;
msg_env->phony_proc.common.id = ERTS_INVALID_PID;
+ msg_env->env.tracee = tracee;
+
#ifdef FORCE_HEAP_FRAGS
msg_env->phony_proc.space_verified = 0;
msg_env->phony_proc.space_verified_from = NULL;
#endif
- msg_env->env.tracee = tracee;
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ msg_env->env.dbg_disable_assert_in_env = 0;
+#endif
}
ErlNifEnv* enif_alloc_env(void)
@@ -519,6 +604,9 @@ void enif_clear_env(ErlNifEnv* env)
ASSERT(p == menv->env.proc);
ASSERT(p->common.id == ERTS_INVALID_PID);
ASSERT(MBUF(p) == menv->env.heap_frag);
+
+ free_tmp_objs(env);
+
if (MBUF(p) != NULL) {
erts_cleanup_offheap(&MSO(p));
clear_offheap(&MSO(p));
@@ -530,13 +618,11 @@ void enif_clear_env(ErlNifEnv* env)
menv->env.hp = menv->env.hp_end = HEAP_TOP(p);
ASSERT(!is_offheap(&MSO(p)));
- free_tmp_objs(env);
}
-#ifdef ERTS_SMP
#ifdef DEBUG
static int enif_send_delay = 0;
-#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 2 == 0)
+#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 32 == 0)
#else
#ifdef ERTS_PROC_LOCK_OWN_IMPL
#define ERTS_FORCE_ENIF_SEND_DELAY() 0
@@ -557,7 +643,11 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
ErlTraceMessageQueue *msgq, **last_msgq;
int reds = 0;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+ /* Only one thread at a time is allowed to flush trace messages,
+ so we require the main lock to be held when doing the flush */
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
msgq = c_p->trace_msg_q;
@@ -576,7 +666,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
msgq->first = NULL;
msgq->last = &msgq->first;
msgq->len = 0;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
ASSERT(len != 0);
@@ -585,17 +675,17 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
rp_locks = 0;
if (rp->common.id == c_p->common.id)
rp_locks = c_p_locks;
- erts_queue_messages(rp, rp_locks, first, last, len, c_p->common.id);
+ erts_queue_proc_messages(c_p, rp, rp_locks, first, last, len);
if (rp->common.id == c_p->common.id)
rp_locks &= ~c_p_locks;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
reds += len;
} else {
erts_cleanup_messages(first);
}
reds += 1;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
msgq = msgq->next;
} while (msgq);
@@ -612,36 +702,27 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
}
error:
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
return reds;
}
-#endif
int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ErlNifEnv* msg_env, ERL_NIF_TERM msg)
{
struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
ErtsProcLocks rp_locks = 0;
-#ifdef ERTS_SMP
ErtsProcLocks lc_locks = 0;
-#endif
Process* rp;
Process* c_p;
ErtsMessage *mp;
+ Eterm from;
Eterm receiver = to_pid->pid;
int scheduler;
execution_state(env, &c_p, &scheduler);
-#ifndef ERTS_SMP
- if (!scheduler) {
- erts_exit(ERTS_ABORT_EXIT,
- "enif_send: called from non-scheduler thread on non-SMP VM");
- return 0;
- }
-#endif
if (scheduler > 0) { /* Normal scheduler */
rp = erts_proc_lookup(receiver);
@@ -655,7 +736,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
return 0;
if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
}
@@ -664,7 +745,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ERTS_P2P_FLG_INC_REFC);
if (!rp) {
if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
return 0;
}
}
@@ -673,8 +754,58 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
rp_locks = ERTS_PROC_LOCK_MAIN;
if (menv) {
+ Eterm token = c_p ? SEQ_TRACE_TOKEN(c_p) : am_undefined;
+ if (token != NIL && token != am_undefined) {
+ /* This code is copied from erts_send_message */
+ Eterm stoken = SEQ_TRACE_TOKEN(c_p);
+#ifdef USE_VM_PROBES
+ DTRACE_CHARBUF(sender_name, 64);
+ DTRACE_CHARBUF(receiver_name, 64);
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+ Eterm utag = NIL;
+ *sender_name = *receiver_name = '\0';
+ if (DTRACE_ENABLED(message_send)) {
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
+ "%T", c_p->common.id);
+ erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
+ "%T", rp->common.id);
+ }
+#endif
+ if (have_seqtrace(stoken)) {
+ seq_trace_update_send(c_p);
+ seq_trace_output(stoken, msg, SEQ_TRACE_SEND,
+ rp->common.id, c_p);
+ }
+#ifdef USE_VM_PROBES
+ if (!(DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING)) {
+ stoken = NIL;
+ }
+#endif
+ token = enif_make_copy(msg_env, stoken);
+
+#ifdef USE_VM_PROBES
+ if (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING) {
+ if (is_immed(DT_UTAG(c_p)))
+ utag = DT_UTAG(c_p);
+ else
+ utag = enif_make_copy(msg_env, DT_UTAG(c_p));
+ }
+ if (DTRACE_ENABLED(message_send)) {
+ if (have_seqtrace(stoken)) {
+ tok_label = SEQ_TRACE_T_DTRACE_LABEL(stoken);
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken));
+ }
+ DTRACE6(message_send, sender_name, receiver_name,
+ size_object(msg), tok_label, tok_lastcnt, tok_serial);
+ }
+#endif
+ }
flush_env(msg_env);
mp = erts_alloc_message(0, NULL);
+ ERL_MESSAGE_TOKEN(mp) = token;
mp->data.heap_frag = menv->env.heap_frag;
ASSERT(mp->data.heap_frag == MBUF(&menv->phony_proc));
if (mp->data.heap_frag != NULL) {
@@ -687,16 +818,19 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
MBUF(&menv->phony_proc) = NULL;
}
} else {
- Uint sz = size_object(msg);
+ erts_literal_area_t litarea;
ErlOffHeap *ohp;
Eterm *hp;
- if (env && !env->tracee) {
+ Uint sz;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
+ sz = size_object_litopt(msg, &litarea);
+ if (c_p && !env->tracee) {
full_flush_env(env);
mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
full_cache_env(env);
}
else {
- erts_aint_t state = erts_smp_atomic32_read_nob(&rp->state);
+ erts_aint_t state = erts_atomic32_read_nob(&rp->state);
if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
mp = erts_alloc_message(sz, &hp);
ohp = sz == 0 ? NULL : &mp->hfrag.off_heap;
@@ -709,10 +843,11 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ohp = &bp->off_heap;
}
}
- msg = copy_struct(msg, sz, &hp, ohp);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ msg = copy_struct_litopt(msg, sz, &hp, ohp, &litarea);
}
- ERL_MESSAGE_TERM(mp) = msg;
+ from = c_p ? c_p->common.id : am_undefined;
if (!env || !env->tracee) {
@@ -722,7 +857,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
full_cache_env(env);
}
}
-#ifdef ERTS_SMP
else {
/* This clause is taken when the nif is called in the context
of a traced process. We do not know which locks we have
@@ -732,8 +866,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ErlTraceMessageQueue *msgq;
Process *t_p = env->tracee;
-
- erts_smp_proc_lock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_lock(t_p, ERTS_PROC_LOCK_TRACE);
msgq = t_p->trace_msg_q;
@@ -750,7 +883,11 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
#endif
if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq ||
rp_locks & ERTS_PROC_LOCK_MSGQ ||
- erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+ erts_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+
+ ERL_MESSAGE_TERM(mp) = msg;
+ ERL_MESSAGE_FROM(mp) = from;
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
if (!msgq) {
msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE,
@@ -764,36 +901,35 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
msgq->next = t_p->trace_msg_q;
t_p->trace_msg_q = msgq;
- erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
erts_schedule_flush_trace_messages(t_p, 0);
} else {
msgq->len++;
*msgq->last = mp;
msgq->last = &mp->next;
- erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
}
goto done;
} else {
- erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
rp_locks &= ~ERTS_PROC_LOCK_TRACE;
rp_locks |= ERTS_PROC_LOCK_MSGQ;
}
}
-#endif /* ERTS_SMP */
- erts_queue_message(rp, rp_locks, mp, msg,
- c_p ? c_p->common.id : am_undefined);
+ if (c_p)
+ erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
+ else
+ erts_queue_message(rp, rp_locks, mp, msg, from);
-#ifdef ERTS_SMP
done:
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks & ~lc_locks)
- erts_smp_proc_unlock(rp, rp_locks & ~lc_locks);
+ erts_proc_unlock(rp, rp_locks & ~lc_locks);
if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
-#endif
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
if (scheduler <= 0)
erts_proc_dec_refc(rp);
@@ -823,15 +959,9 @@ enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
if (scheduler > 0)
prt = erts_port_lookup(to_port->port_id, iflags);
else {
-#ifdef ERTS_SMP
if (ERTS_PROC_IS_EXITING(c_p))
return 0;
prt = erts_thr_port_lookup(to_port->port_id, iflags);
-#else
- erts_exit(ERTS_ABORT_EXIT,
- "enif_port_command: called from non-scheduler "
- "thread on non-SMP VM");
-#endif
}
if (!prt)
@@ -845,6 +975,65 @@ enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
return res;
}
+/*
+ * env must be the caller's environment in a scheduler or NULL in a
+ * non-scheduler thread.
+ * name must be an atom - anything else will just waste time.
+ */
+static Eterm call_whereis(ErlNifEnv *env, Eterm name)
+{
+ Process *c_p;
+ Eterm res;
+ int scheduler;
+
+ execution_state(env, &c_p, &scheduler);
+ ASSERT((c_p && scheduler) || (!c_p && !scheduler));
+
+ if (scheduler < 0) {
+ /* dirty scheduler */
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return 0;
+
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ c_p = NULL; /* as we don't have main lock */
+ }
+
+
+ if (c_p) {
+ /* main lock may be released below and c_p->htop updated by others */
+ flush_env(env);
+ }
+ res = erts_whereis_name_to_id(c_p, name);
+ if (c_p)
+ cache_env(env);
+
+ return res;
+}
+
+int enif_whereis_pid(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPid *pid)
+{
+ Eterm res;
+
+ if (is_not_atom(name))
+ return 0;
+
+ res = call_whereis(env, name);
+ /* enif_get_local_ functions check the type */
+ return enif_get_local_pid(env, res, pid);
+}
+
+int enif_whereis_port(ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port)
+{
+ Eterm res;
+
+ if (is_not_atom(name))
+ return 0;
+
+ res = call_whereis(env, name);
+ /* enif_get_local_ functions check the type */
+ return enif_get_local_port(env, res, port);
+}
+
ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
{
Uint sz;
@@ -947,11 +1136,6 @@ int enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term)
return is_number(term);
}
-static ERTS_INLINE int is_proc_bound(ErlNifEnv* env)
-{
- return env->mod_nif != NULL;
-}
-
static void aligned_binary_dtor(struct enif_tmp_obj_t* obj)
{
erts_free_aligned_binary_bytes_extra((byte*)obj, obj->allocator);
@@ -965,16 +1149,14 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
byte* raw_ptr;
}u;
- if (is_boxed(bin_term) && *binary_val(bin_term) == HEADER_SUB_BIN) {
- ErlSubBin* sb = (ErlSubBin*) binary_val(bin_term);
- if (sb->is_writable) {
- ProcBin* pb = (ProcBin*) binary_val(sb->orig);
- ASSERT(pb->thing_word == HEADER_PROC_BIN);
- if (pb->flags) {
- erts_emasculate_writable_binary(pb);
- sb->is_writable = 0;
- }
- }
+ if (is_binary(bin_term)) {
+ ProcBin *pb = (ProcBin*) binary_val(bin_term);
+ if (pb->thing_word == HEADER_SUB_BIN) {
+ ErlSubBin* sb = (ErlSubBin*) pb;
+ pb = (ProcBin*) binary_val(sb->orig);
+ }
+ if (pb->thing_word == HEADER_PROC_BIN && pb->flags)
+ erts_emasculate_writable_binary(pb);
}
u.tmp = NULL;
bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator,
@@ -988,22 +1170,14 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
u.tmp->dtor = &aligned_binary_dtor;
env->tmp_obj_list = u.tmp;
}
- bin->bin_term = bin_term;
bin->size = binary_size(bin_term);
bin->ref_bin = NULL;
- ADD_READONLY_CHECK(env, bin->data, bin->size);
+ ADD_READONLY_CHECK(env, bin->data, bin->size);
return 1;
}
-static void tmp_alloc_dtor(struct enif_tmp_obj_t* obj)
-{
- erts_free(obj->allocator, obj);
-}
-
int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
{
- struct enif_tmp_obj_t* tobj;
- ErtsAlcType_t allocator;
ErlDrvSizeT sz;
if (is_binary(term)) {
return enif_inspect_binary(env,term,bin);
@@ -1011,7 +1185,6 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
if (is_nil(term)) {
bin->data = (unsigned char*) &bin->data; /* dummy non-NULL */
bin->size = 0;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
return 1;
}
@@ -1019,16 +1192,8 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
return 0;
}
- allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
- tobj = erts_alloc(allocator, sz + sizeof(struct enif_tmp_obj_t));
- tobj->allocator = allocator;
- tobj->next = env->tmp_obj_list;
- tobj->dtor = &tmp_alloc_dtor;
- env->tmp_obj_list = tobj;
-
- bin->data = (unsigned char*) &tobj[1];
+ bin->data = alloc_tmp_obj(env, sz, &tmp_alloc_dtor);
bin->size = sz;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
erts_iolist_to_buf(term, (char*) bin->data, sz);
ADD_READONLY_CHECK(env, bin->data, bin->size);
@@ -1043,11 +1208,9 @@ int enif_alloc_binary(size_t size, ErlNifBinary* bin)
if (refbin == NULL) {
return 0; /* The NIF must take action */
}
- erts_refc_init(&refbin->refc, 1);
bin->size = size;
bin->data = (unsigned char*) refbin->orig_bytes;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = refbin;
return 1;
}
@@ -1081,14 +1244,10 @@ void enif_release_binary(ErlNifBinary* bin)
{
if (bin->ref_bin != NULL) {
Binary* refbin = bin->ref_bin;
- ASSERT(bin->bin_term == THE_NON_VALUE);
- if (erts_refc_dectest(&refbin->refc, 0) == 0) {
- erts_bin_free(refbin);
- }
+ erts_bin_release(refbin);
}
#ifdef DEBUG
bin->data = NULL;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
#endif
}
@@ -1136,11 +1295,12 @@ size_t enif_binary_to_term(ErlNifEnv *dst_env,
Sint size;
ErtsHeapFactory factory;
byte *bp = (byte*) data;
+ Uint32 flags = 0;
- ERTS_CT_ASSERT(ERL_NIF_BIN2TERM_SAFE == ERTS_DIST_EXT_BTT_SAFE);
-
- if (opts & ~ERL_NIF_BIN2TERM_SAFE) {
- return 0;
+ switch ((Uint32)opts) {
+ case 0: break;
+ case ERL_NIF_BIN2TERM_SAFE: flags = ERTS_DIST_EXT_BTT_SAFE; break;
+ default: return 0;
}
if ((size = erts_decode_ext_size(bp, data_sz)) < 0)
return 0;
@@ -1152,13 +1312,15 @@ size_t enif_binary_to_term(ErlNifEnv *dst_env,
erts_factory_dummy_init(&factory);
}
- *term = erts_decode_ext(&factory, &bp, (Uint32)opts);
+ *term = erts_decode_ext(&factory, &bp, flags);
if (is_non_value(*term)) {
return 0;
}
- erts_factory_close(&factory);
- cache_env(dst_env);
+ if (size > 0) {
+ erts_factory_close(&factory);
+ cache_env(dst_env);
+ }
ASSERT(bp > data);
return bp - data;
@@ -1182,6 +1344,22 @@ int enif_compare(Eterm lhs, Eterm rhs)
return result;
}
+ErlNifUInt64 enif_hash(ErlNifHash type, Eterm term, ErlNifUInt64 salt)
+{
+ switch (type) {
+ case ERL_NIF_INTERNAL_HASH:
+ return make_internal_hash(term, (Uint32) salt);
+ case ERL_NIF_PHASH2:
+ /* It appears that make_hash2 doesn't always react to seasoning
+ * as well as it should. Therefore, let's make it ignore the salt
+ * value and declare salted uses of phash2 as unsupported.
+ */
+ return make_hash2(term) & ((1 << 27) - 1);
+ default:
+ return 0;
+ }
+}
+
int enif_get_tuple(ErlNifEnv* env, Eterm tpl, int* arity, const Eterm** array)
{
Eterm* ptr;
@@ -1228,39 +1406,51 @@ int enif_get_string(ErlNifEnv *env, ERL_NIF_TERM list, char* buf, unsigned len,
Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
{
- if (bin->bin_term != THE_NON_VALUE) {
- return bin->bin_term;
- }
- else if (bin->ref_bin != NULL) {
- Binary* bptr = bin->ref_bin;
- ProcBin* pb;
- Eterm bin_term;
-
- /* !! Copy-paste from new_binary() !! */
- pb = (ProcBin *) alloc_heap(env, PROC_BIN_SIZE);
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = bptr->orig_size;
- pb->next = MSO(env->proc).first;
- MSO(env->proc).first = (struct erl_off_heap_header*) pb;
- pb->val = bptr;
- pb->bytes = (byte*) bptr->orig_bytes;
- pb->flags = 0;
-
- OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm));
- bin_term = make_binary(pb);
- if (erts_refc_read(&bptr->refc, 1) == 1) {
- /* Total ownership transfer */
- bin->ref_bin = NULL;
- bin->bin_term = bin_term;
- }
- return bin_term;
- }
- else {
- flush_env(env);
- bin->bin_term = new_binary(env->proc, bin->data, bin->size);
- cache_env(env);
- return bin->bin_term;
+ Eterm bin_term;
+
+ if (bin->ref_bin != NULL) {
+ Binary* binary = bin->ref_bin;
+
+ /* If the binary is smaller than the heap binary limit we'll return a
+ * heap binary to reduce the number of small refc binaries in the
+ * system. We can't simply release the refc binary right away however;
+ * the documentation states that the binary should be considered
+ * read-only from this point on, which implies that it should still be
+ * readable.
+ *
+ * We could keep it alive until we return by adding it to the temporary
+ * object list, but that requires an off-heap allocation which is
+ * potentially quite slow, so we create a dummy ProcBin instead and
+ * rely on the next minor GC to get rid of it. */
+ if (bin->size <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hb;
+
+ hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(bin->size));
+ hb->thing_word = header_heap_bin(bin->size);
+ hb->size = bin->size;
+
+ sys_memcpy(hb->data, bin->data, bin->size);
+
+ erts_build_proc_bin(&MSO(env->proc),
+ alloc_heap(env, PROC_BIN_SIZE),
+ binary);
+
+ bin_term = make_binary(hb);
+ } else {
+ bin_term = erts_build_proc_bin(&MSO(env->proc),
+ alloc_heap(env, PROC_BIN_SIZE),
+ binary);
+ }
+
+ /* Our (possibly shared) ownership has been transferred to the term. */
+ bin->ref_bin = NULL;
+ } else {
+ flush_env(env);
+ bin_term = new_binary(env->proc, bin->data, bin->size);
+ cache_env(env);
}
+
+ return bin_term;
}
Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
@@ -1298,22 +1488,15 @@ Eterm enif_make_badarg(ErlNifEnv* env)
Eterm enif_raise_exception(ErlNifEnv* env, ERL_NIF_TERM reason)
{
- Process *c_p;
-
- execution_state(env, &c_p, NULL);
-
env->exception_thrown = 1;
- c_p->fvalue = reason;
- BIF_ERROR(c_p, EXC_ERROR);
+ env->proc->fvalue = reason;
+ BIF_ERROR(env->proc, EXC_ERROR);
}
int enif_has_pending_exception(ErlNifEnv* env, ERL_NIF_TERM* reason)
{
- if (env->exception_thrown && reason != NULL) {
- Process *c_p;
- execution_state(env, &c_p, NULL);
- *reason = c_p->fvalue;
- }
+ if (env->exception_thrown && reason != NULL)
+ *reason = env->proc->fvalue;
return env->exception_thrown;
}
@@ -1581,6 +1764,9 @@ int enif_make_existing_atom_len(ErlNifEnv* env, const char* name, size_t len,
ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt+1);
Eterm ret = make_tuple(hp);
va_list ap;
@@ -1588,7 +1774,9 @@ ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
*hp++ = make_arityval(cnt);
va_start(ap,cnt);
while (cnt--) {
- *hp++ = va_arg(ap,Eterm);
+ Eterm elem = va_arg(ap,Eterm);
+ ASSERT_IN_ENV(env, elem, ++nr, "tuple");
+ *hp++ = elem;
}
va_end(ap);
return ret;
@@ -1596,12 +1784,16 @@ ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
ERL_NIF_TERM enif_make_tuple_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt+1);
Eterm ret = make_tuple(hp);
const Eterm* src = arr;
*hp++ = make_arityval(cnt);
while (cnt--) {
+ ASSERT_IN_ENV(env, *src, ++nr, "tuple");
*hp++ = *src++;
}
return ret;
@@ -1612,6 +1804,8 @@ ERL_NIF_TERM enif_make_list_cell(ErlNifEnv* env, Eterm car, Eterm cdr)
Eterm* hp = alloc_heap(env,2);
Eterm ret = make_list(hp);
+ ASSERT_IN_ENV(env, car, 0, "head of list cell");
+ ASSERT_IN_ENV(env, cdr, 0, "tail of list cell");
CAR(hp) = car;
CDR(hp) = cdr;
return ret;
@@ -1623,6 +1817,9 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
return NIL;
}
else {
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt*2);
Eterm ret = make_list(hp);
Eterm* last = &ret;
@@ -1630,8 +1827,10 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
va_start(ap,cnt);
while (cnt--) {
+ Eterm term = va_arg(ap,Eterm);
*last = make_list(hp);
- *hp = va_arg(ap,Eterm);
+ ASSERT_IN_ENV(env, term, ++nr, "list");
+ *hp = term;
last = ++hp;
++hp;
}
@@ -1643,14 +1842,19 @@ ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
ERL_NIF_TERM enif_make_list_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt)
{
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int nr = 0;
+#endif
Eterm* hp = alloc_heap(env,cnt*2);
Eterm ret = make_list(hp);
Eterm* last = &ret;
const Eterm* src = arr;
while (cnt--) {
+ Eterm term = *src++;
*last = make_list(hp);
- *hp = *src++;
+ ASSERT_IN_ENV(env, term, ++nr, "list");
+ *hp = term;
last = ++hp;
++hp;
}
@@ -1674,7 +1878,7 @@ ERL_NIF_TERM enif_make_string_len(ErlNifEnv* env, const char* string,
ERL_NIF_TERM enif_make_ref(ErlNifEnv* env)
{
- Eterm* hp = alloc_heap(env, REF_THING_SIZE);
+ Eterm* hp = alloc_heap(env, ERTS_REF_THING_SIZE);
return erts_make_ref_in_buffer(hp);
}
@@ -1683,13 +1887,9 @@ void enif_system_info(ErlNifSysInfo *sip, size_t si_size)
driver_system_info(sip, si_size);
}
-int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list) {
- Eterm *listptr, ret = NIL, *hp;
-
- if (is_nil(term)) {
- *list = term;
- return 1;
- }
+int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list)
+{
+ Eterm *listptr, ret, *hp;
ret = NIL;
@@ -1734,18 +1934,11 @@ int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc)
if (scheduler > 0)
return !!erts_proc_lookup(proc->pid);
else {
-#ifdef ERTS_SMP
Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0,
ERTS_P2P_FLG_INC_REFC);
if (rp)
erts_proc_dec_refc(rp);
return !!rp;
-#else
- erts_exit(ERTS_ABORT_EXIT, "enif_is_process_alive: "
- "called from non-scheduler thread "
- "in non-smp emulator");
- return 0;
-#endif
}
}
@@ -1761,17 +1954,10 @@ int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port)
if (scheduler > 0)
return !!erts_port_lookup(port->port_id, iflags);
else {
-#ifdef ERTS_SMP
Port *prt = erts_thr_port_lookup(port->port_id, iflags);
if (prt)
erts_port_dec_refc(prt);
return !!prt;
-#else
- erts_exit(ERTS_ABORT_EXIT, "enif_is_port_alive: "
- "called from non-scheduler thread "
- "in non-smp emulator");
- return 0;
-#endif
}
}
@@ -1853,6 +2039,12 @@ ErlNifTid enif_thread_self(void) { return erl_drv_thread_self(); }
int enif_equal_tids(ErlNifTid tid1, ErlNifTid tid2) { return erl_drv_equal_tids(tid1,tid2); }
void enif_thread_exit(void *resp) { erl_drv_thread_exit(resp); }
int enif_thread_join(ErlNifTid tid, void **respp) { return erl_drv_thread_join(tid,respp); }
+
+char* enif_mutex_name(ErlNifMutex *mtx) {return erl_drv_mutex_name(mtx); }
+char* enif_cond_name(ErlNifCond *cnd) { return erl_drv_cond_name(cnd); }
+char* enif_rwlock_name(ErlNifRWLock* rwlck) { return erl_drv_rwlock_name(rwlck); }
+char* enif_thread_name(ErlNifTid tid) { return erl_drv_thread_name(tid); }
+
int enif_getenv(const char *key, char *value, size_t *value_size) { return erl_drv_getenv(key, value, value_size); }
ErlNifTime enif_monotonic_time(ErlNifTimeUnit time_unit)
@@ -1875,16 +2067,21 @@ enif_convert_time_unit(ErlNifTime val,
(int) to);
}
-int enif_fprintf(void* filep, const char* format, ...)
+int enif_fprintf(FILE* filep, const char* format, ...)
{
int ret;
va_list arglist;
va_start(arglist, format);
- ret = erts_vfprintf((FILE*)filep, format, arglist);
+ ret = erts_vfprintf(filep, format, arglist);
va_end(arglist);
return ret;
}
+int enif_vfprintf(FILE* filep, const char *format, va_list ap)
+{
+ return erts_vfprintf(filep, format, ap);
+}
+
int enif_snprintf(char *buffer, size_t size, const char* format, ...)
{
int ret;
@@ -1895,42 +2092,19 @@ int enif_snprintf(char *buffer, size_t size, const char* format, ...)
return ret;
}
+int enif_vsnprintf(char* buffer, size_t size, const char *format, va_list ap)
+{
+ return erts_vsnprintf(buffer, size, format, ap);
+}
+
+
/***********************************************************
** Memory managed (GC'ed) "resource" objects **
***********************************************************/
-
-struct enif_resource_type_t
-{
- struct enif_resource_type_t* next; /* list of all resource types */
- struct enif_resource_type_t* prev;
- struct erl_module_nif* owner; /* that created this type and thus implements the destructor*/
- ErlNifResourceDtor* dtor; /* user destructor function */
- erts_refc_t refc; /* num of resources of this type (HOTSPOT warning)
- +1 for active erl_module_nif */
- Eterm module;
- Eterm name;
-};
-
/* dummy node in circular list */
struct enif_resource_type_t resource_type_list;
-typedef struct enif_resource_t
-{
- struct enif_resource_type_t* type;
-#ifdef DEBUG
- erts_refc_t nif_refc;
-# ifdef ARCH_32
- byte align__[4];
-# endif
-#endif
-
- char data[1];
-}ErlNifResource;
-
-#define SIZEOF_ErlNifResource(SIZE) (offsetof(ErlNifResource,data) + (SIZE))
-#define DATA_TO_RESOURCE(PTR) ((ErlNifResource*)((char*)(PTR) - offsetof(ErlNifResource,data)))
-
static ErlNifResourceType* find_resource_type(Eterm module, Eterm name)
{
ErlNifResourceType* type;
@@ -1955,10 +2129,10 @@ static void close_lib(struct erl_module_nif* lib)
ASSERT(lib->handle != NULL);
ASSERT(erts_refc_read(&lib->rt_dtor_cnt,0) == 0);
- if (lib->entry != NULL && lib->entry->unload != NULL) {
+ if (lib->entry.unload != NULL) {
struct enif_msg_environment_t msg_env;
pre_nif_noproc(&msg_env, lib, NULL);
- lib->entry->unload(&msg_env.env, lib->priv_data);
+ lib->entry.unload(&msg_env.env, lib->priv_data);
post_nif_noproc(&msg_env);
}
if (!erts_is_static_nif(lib->handle))
@@ -1992,24 +2166,23 @@ struct opened_resource_type
ErlNifResourceFlags op;
ErlNifResourceType* type;
- ErlNifResourceDtor* new_dtor;
+ ErlNifResourceTypeInit new_callbacks;
};
static struct opened_resource_type* opened_rt_list = NULL;
-ErlNifResourceType*
-enif_open_resource_type(ErlNifEnv* env,
- const char* module_str,
- const char* name_str,
- ErlNifResourceDtor* dtor,
- ErlNifResourceFlags flags,
- ErlNifResourceFlags* tried)
+static
+ErlNifResourceType* open_resource_type(ErlNifEnv* env,
+ const char* name_str,
+ const ErlNifResourceTypeInit* init,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried,
+ size_t sizeof_init)
{
ErlNifResourceType* type = NULL;
ErlNifResourceFlags op = flags;
Eterm module_am, name_am;
- ASSERT(erts_smp_thr_progress_is_blocking());
- ASSERT(module_str == NULL); /* for now... */
+ ASSERT(erts_thr_progress_is_blocking());
module_am = make_atom(env->mod_nif->mod->module);
name_am = enif_make_atom(env, name_str);
@@ -2043,7 +2216,9 @@ enif_open_resource_type(ErlNifEnv* env,
sizeof(struct opened_resource_type));
ort->op = op;
ort->type = type;
- ort->new_dtor = dtor;
+ sys_memzero(&ort->new_callbacks, sizeof(ErlNifResourceTypeInit));
+ ASSERT(sizeof_init > 0 && sizeof_init <= sizeof(ErlNifResourceTypeInit));
+ sys_memcpy(&ort->new_callbacks, init, sizeof_init);
ort->next = opened_rt_list;
opened_rt_list = ort;
}
@@ -2053,6 +2228,31 @@ enif_open_resource_type(ErlNifEnv* env,
return type;
}
+ErlNifResourceType*
+enif_open_resource_type(ErlNifEnv* env,
+ const char* module_str,
+ const char* name_str,
+ ErlNifResourceDtor* dtor,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried)
+{
+ ErlNifResourceTypeInit init = {dtor, NULL};
+ ASSERT(module_str == NULL); /* for now... */
+ return open_resource_type(env, name_str, &init, flags, tried,
+ sizeof(init));
+}
+
+ErlNifResourceType*
+enif_open_resource_type_x(ErlNifEnv* env,
+ const char* name_str,
+ const ErlNifResourceTypeInit* init,
+ ErlNifResourceFlags flags,
+ ErlNifResourceFlags* tried)
+{
+ return open_resource_type(env, name_str, init, flags, tried,
+ env->mod_nif->entry.sizeof_ErlNifResourceTypeInit);
+}
+
static void commit_opened_resource_types(struct erl_module_nif* lib)
{
while (opened_rt_list) {
@@ -2071,7 +2271,9 @@ static void commit_opened_resource_types(struct erl_module_nif* lib)
}
type->owner = lib;
- type->dtor = ort->new_dtor;
+ type->dtor = ort->new_callbacks.dtor;
+ type->stop = ort->new_callbacks.stop;
+ type->down = ort->new_callbacks.down;
if (type->dtor != NULL) {
erts_refc_inc(&lib->rt_dtor_cnt, 1);
@@ -2097,12 +2299,103 @@ static void rollback_opened_resource_types(void)
}
}
+#ifdef ARCH_64
+# define ERTS_RESOURCE_DYING_FLAG (((Uint) 1) << 63)
+#else
+# define ERTS_RESOURCE_DYING_FLAG (((Uint) 1) << 31)
+#endif
+#define ERTS_RESOURCE_REFC_MASK (~ERTS_RESOURCE_DYING_FLAG)
+
+static ERTS_INLINE void
+rmon_set_dying(ErtsResourceMonitors *rms)
+{
+ rms->refc |= ERTS_RESOURCE_DYING_FLAG;
+}
+
+static ERTS_INLINE int
+rmon_is_dying(ErtsResourceMonitors *rms)
+{
+ return !!(rms->refc & ERTS_RESOURCE_DYING_FLAG);
+}
+
+static ERTS_INLINE void
+rmon_refc_inc(ErtsResourceMonitors *rms)
+{
+ rms->refc++;
+}
+
+static ERTS_INLINE Uint
+rmon_refc_dec_read(ErtsResourceMonitors *rms)
+{
+ Uint res;
+ ASSERT((rms->refc & ERTS_RESOURCE_REFC_MASK) != 0);
+ res = --rms->refc;
+ return res & ERTS_RESOURCE_REFC_MASK;
+}
+
+static ERTS_INLINE void
+rmon_refc_dec(ErtsResourceMonitors *rms)
+{
+ ASSERT((rms->refc & ERTS_RESOURCE_REFC_MASK) != 0);
+ --rms->refc;
+}
-static void nif_resource_dtor(Binary* bin)
+static ERTS_INLINE Uint
+rmon_refc_read(ErtsResourceMonitors *rms)
{
- ErlNifResource* resource = (ErlNifResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
+ return rms->refc & ERTS_RESOURCE_REFC_MASK;
+}
+
+static void dtor_demonitor(ErtsMonitor* mon, void* context)
+{
+ ASSERT(erts_monitor_is_origin(mon));
+ ASSERT(is_internal_pid(mon->other.item));
+
+ erts_proc_sig_send_demonitor(mon);
+}
+
+# define NIF_RESOURCE_DTOR &nif_resource_dtor
+
+static int nif_resource_dtor(Binary* bin)
+{
+ ErtsResource* resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
ErlNifResourceType* type = resource->type;
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+
+ if (resource->monitors) {
+ ErtsResourceMonitors* rm = resource->monitors;
+ int kill;
+ ErtsMonitor *root;
+ Uint refc;
+
+ ASSERT(type->down);
+ erts_mtx_lock(&rm->lock);
+ ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0);
+ kill = !rmon_is_dying(rm);
+ if (kill) {
+ rmon_set_dying(rm);
+ root = rm->root;
+ rm->root = NULL;
+ }
+ refc = rmon_refc_read(rm);
+ erts_mtx_unlock(&rm->lock);
+
+ if (kill)
+ erts_monitor_tree_foreach_delete(&root,
+ dtor_demonitor,
+ NULL);
+
+ /*
+ * If resource->monitors->refc != 0 there are
+ * outstanding references to the resource from
+ * monitors that has not been removed yet.
+ * nif_resource_dtor() will be called again this
+ * reference count reach zero.
+ */
+ if (refc != 0)
+ return 0; /* we'll be back... */
+ erts_mtx_destroy(&rm->lock);
+ }
if (type->dtor != NULL) {
struct enif_msg_environment_t msg_env;
@@ -2117,85 +2410,230 @@ static void nif_resource_dtor(Binary* bin)
steal_resource_type(type);
erts_free(ERTS_ALC_T_NIF, type);
}
+ return 1;
+}
+
+void erts_resource_stop(ErtsResource* resource, ErlNifEvent e,
+ int is_direct_call)
+{
+ struct enif_msg_environment_t msg_env;
+ ASSERT(resource->type->stop);
+ pre_nif_noproc(&msg_env, resource->type->owner, NULL);
+ resource->type->stop(&msg_env.env, resource->data, e, is_direct_call);
+ post_nif_noproc(&msg_env);
+}
+
+void erts_nif_demonitored(ErtsResource* resource)
+{
+ ErtsResourceMonitors* rmp = resource->monitors;
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ int free_me;
+
+ ASSERT(rmp);
+ ASSERT(resource->type->down);
+
+ erts_mtx_lock(&rmp->lock);
+ free_me = ((rmon_refc_dec_read(rmp) == 0) & !!rmon_is_dying(rmp));
+ erts_mtx_unlock(&rmp->lock);
+
+ if (free_me)
+ erts_bin_free(&bin->binary);
+}
+
+void erts_fire_nif_monitor(ErtsMonitor *tmon)
+{
+ ErtsResource* resource;
+ ErtsMonitorData *mdp;
+ ErtsMonitor *omon;
+ ErtsBinary* bin;
+ struct enif_msg_environment_t msg_env;
+ ErlNifPid nif_pid;
+ ErlNifMonitor nif_monitor;
+ ErtsResourceMonitors* rmp;
+ Uint mrefc, brefc;
+ int active, is_dying;
+
+ ASSERT(tmon->type == ERTS_MON_TYPE_RESOURCE);
+ ASSERT(erts_monitor_is_target(tmon));
+
+ resource = tmon->other.ptr;
+ bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ rmp = resource->monitors;
+
+ mdp = erts_monitor_to_data(tmon);
+ omon = &mdp->origin;
+
+ ASSERT(rmp);
+ ASSERT(resource->type->down);
+
+ erts_mtx_lock(&rmp->lock);
+
+ mrefc = rmon_refc_dec_read(rmp);
+ is_dying = rmon_is_dying(rmp);
+ active = !is_dying && erts_monitor_is_in_table(omon);
+
+ if (active) {
+ erts_monitor_tree_delete(&rmp->root, omon);
+ brefc = (Uint) erts_refc_inc_unless(&bin->binary.intern.refc, 0, 0);
+ }
+
+ erts_mtx_unlock(&rmp->lock);
+
+ if (!active) {
+ ASSERT(!is_dying || erts_refc_read(&bin->binary.intern.refc, 0) == 0);
+ if (is_dying && mrefc == 0)
+ erts_bin_free(&bin->binary);
+ erts_monitor_release(tmon);
+ }
+ else {
+ if (brefc > 0) {
+ ASSERT(is_internal_pid(omon->other.item));
+ erts_ref_to_driver_monitor(mdp->ref, &nif_monitor);
+ nif_pid.pid = omon->other.item;
+ pre_nif_noproc(&msg_env, resource->type->owner, NULL);
+ resource->type->down(&msg_env.env, resource->data, &nif_pid, &nif_monitor);
+ post_nif_noproc(&msg_env);
+
+ erts_bin_release(&bin->binary);
+ }
+
+ erts_monitor_release_both(mdp);
+ }
}
-void* enif_alloc_resource(ErlNifResourceType* type, size_t size)
+void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
{
- Binary* bin = erts_create_magic_binary_x(SIZEOF_ErlNifResource(size),
- &nif_resource_dtor,
- 1); /* unaligned */
- ErlNifResource* resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
+ size_t magic_sz = offsetof(ErtsResource,data);
+ Binary* bin;
+ ErtsResource* resource;
+ size_t monitors_offs;
+
+ if (type->down) {
+ /* Put ErtsResourceMonitors after user data and properly aligned */
+ monitors_offs = ((data_sz + ERTS_ALLOC_ALIGN_BYTES - 1)
+ & ~((size_t)ERTS_ALLOC_ALIGN_BYTES - 1));
+ magic_sz += monitors_offs + sizeof(ErtsResourceMonitors);
+ }
+ else {
+ ERTS_UNDEF(monitors_offs, 0);
+ magic_sz += data_sz;
+ }
+ bin = erts_create_magic_binary_x(magic_sz, NIF_RESOURCE_DTOR,
+ ERTS_ALC_T_BINARY,
+ 1); /* unaligned */
+ resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */
resource->type = type;
- erts_refc_inc(&bin->refc, 1);
+ erts_refc_inc(&bin->intern.refc, 1);
#ifdef DEBUG
erts_refc_init(&resource->nif_refc, 1);
#endif
erts_refc_inc(&resource->type->refc, 2);
+ if (type->down) {
+ resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs);
+ erts_mtx_init(&resource->monitors->lock, "resource_monitors", NIL,
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ resource->monitors->root = NULL;
+ resource->monitors->refc = 0;
+ resource->monitors->user_data_sz = data_sz;
+ }
+ else {
+ resource->monitors = NULL;
+ }
return resource->data;
}
void enif_release_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
#ifdef DEBUG
erts_refc_dec(&resource->nif_refc, 0);
#endif
- if (erts_refc_dectest(&bin->binary.refc, 0) == 0) {
- erts_bin_free(&bin->binary);
- }
+ erts_bin_release(&bin->binary);
}
void enif_keep_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
#ifdef DEBUG
erts_refc_inc(&resource->nif_refc, 1);
#endif
- erts_refc_inc(&bin->binary.refc, 2);
+ erts_refc_inc(&bin->binary.intern.refc, 2);
+}
+
+Eterm erts_bld_resource_ref(Eterm** hpp, ErlOffHeap* oh, ErtsResource* resource)
+{
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
+ return erts_mk_magic_ref(hpp, oh, &bin->binary);
}
ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- Eterm* hp = alloc_heap(env,PROC_BIN_SIZE);
- return erts_mk_magic_binary_term(&hp, &MSO(env->proc), &bin->binary);
+ Eterm* hp = alloc_heap(env, ERTS_MAGIC_REF_THING_SIZE);
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
+ return erts_mk_magic_ref(&hp, &MSO(env->proc), &bin->binary);
}
ERL_NIF_TERM enif_make_resource_binary(ErlNifEnv* env, void* obj,
const void* data, size_t size)
{
- Eterm bin = enif_make_resource(env, obj);
- ProcBin* pb = (ProcBin*) binary_val(bin);
- pb->bytes = (byte*) data;
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ ErlOffHeap *ohp = &MSO(env->proc);
+ Eterm* hp = alloc_heap(env,PROC_BIN_SIZE);
+ ProcBin* pb = (ProcBin *) hp;
+
+ pb->thing_word = HEADER_PROC_BIN;
pb->size = size;
- return bin;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header*) pb;
+ pb->val = &bin->binary;
+ pb->bytes = (byte*) data;
+ pb->flags = 0;
+
+ OH_OVERHEAD(ohp, size / sizeof(Eterm));
+ erts_refc_inc(&bin->binary.intern.refc, 1);
+
+ return make_binary(hp);
}
int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* type,
void** objp)
{
- ProcBin* pb;
Binary* mbin;
- ErlNifResource* resource;
- if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
- return 0;
+ ErtsResource* resource;
+ if (is_internal_magic_ref(term))
+ mbin = erts_magic_ref2bin(term);
+ else {
+ Eterm *hp;
+ if (!is_binary(term))
+ return 0;
+ hp = binary_val(term);
+ if (thing_subtag(*hp) != REFC_BINARY_SUBTAG)
+ return 0;
+ /*
+ if (((ProcBin *) hp)->size != 0) {
+ return 0; / * Or should we allow "resource binaries" as handles? * /
+ }
+ */
+ mbin = ((ProcBin *) hp)->val;
+ if (!(mbin->intern.flags & BIN_FLAG_MAGIC))
+ return 0;
}
- pb = (ProcBin*) binary_val(term);
- /*if (pb->size != 0) {
- return 0; / * Or should we allow "resource binaries" as handles? * /
- }*/
- mbin = pb->val;
- resource = (ErlNifResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
- if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != &nif_resource_dtor
+ resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != NIF_RESOURCE_DTOR
|| resource->type != type) {
return 0;
}
@@ -2205,9 +2643,14 @@ int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* typ
size_t enif_sizeof_resource(void* obj)
{
- ErlNifResource* resource = DATA_TO_RESOURCE(obj);
- Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary;
- return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErlNifResource,data);
+ ErtsResource* resource = DATA_TO_RESOURCE(obj);
+ if (resource->monitors) {
+ return resource->monitors->user_data_sz;
+ }
+ else {
+ Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary;
+ return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErtsResource,data);
+ }
}
@@ -2264,184 +2707,36 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent)
return ERTS_BIF_REDS_LEFT(proc) == 0;
}
-/*
- * NIF exports need a few more items than the Export struct provides,
- * including the erl_module_nif* and a NIF function pointer, so the
- * NifExport below adds those. The Export member must be first in the
- * struct. The saved_current, exception_thrown, saved_argc, rootset_extra, and
- * rootset members are used to track the MFA, any pending exception, and
- * arguments of the top NIF in case a chain of one or more
- * enif_schedule_nif() calls results in an exception, since in that case
- * the original MFA and registers have to be restored before returning to
- * Erlang to ensure stacktrace information associated with the exception is
- * correct.
- */
-typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
-
-typedef struct {
- Export exp;
- struct erl_module_nif* m;
- NativeFunPtr fp;
- BeamInstr *saved_current;
- int exception_thrown;
- int saved_argc;
- int rootset_extra;
- Eterm rootset[1];
-} NifExport;
-
-/*
- * If a process has saved arguments, they need to be part of the GC
- * rootset. The function below is called from setup_rootset() in
- * erl_gc.c. This function is declared in erl_process.h. Any exception term
- * saved in the NifExport is also made part of the GC rootset here; it
- * always resides in rootset[0].
- */
-int
-erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj)
-{
- NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- int gc = ep && (ep->saved_argc > 0 || ep->rootset[0] != NIL);
-
- if (gc) {
- *objv = ep->rootset;
- *nobj = 1 + ep->saved_argc;
- }
- return gc;
-}
-
-/*
- * Allocate a NifExport and set it in proc specific data
- */
-static NifExport*
-allocate_nif_sched_data(Process* proc, int argc)
-{
- NifExport* ep;
- size_t total;
- int i;
-
- total = sizeof(NifExport) + argc*sizeof(Eterm);
- ep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, total);
- sys_memset((void*) ep, 0, total);
- ep->rootset_extra = argc;
- ep->rootset[0] = NIL;
- for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- ep->exp.addressv[i] = &ep->exp.code[3];
- }
- ep->exp.code[3] = (BeamInstr) em_call_nif;
- (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ep);
- return ep;
-}
-
static ERTS_INLINE void
-destroy_nif_export(NifExport *nif_export)
+nif_export_cleanup_nif_mod(NifExport *ep)
{
- erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, (void *) nif_export);
+ if (erts_refc_dectest(&ep->m->rt_dtor_cnt, 0) == 0 && ep->m->mod == NULL)
+ close_lib(ep->m);
+ ep->m = NULL;
}
void
-erts_destroy_nif_export(void *nif_export)
+erts_nif_export_cleanup_nif_mod(NifExport *ep)
{
- destroy_nif_export((NifExport *) nif_export);
+ nif_export_cleanup_nif_mod(ep);
}
-/*
- * Initialize a NifExport struct. Create it if needed and store it in the
- * proc. The direct_fp function is what will be invoked by op_call_nif, and
- * the indirect_fp function, if not NULL, is what the direct_fp function
- * will call. If the allocated NifExport isn't enough to hold all of argv,
- * allocate a larger one. Save MFA and registers only if the need_save
- * parameter is true.
- */
-static ERL_NIF_TERM
-init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
- int need_save, int argc, const ERL_NIF_TERM argv[])
+static ERTS_INLINE void
+nif_export_restore(Process *c_p, NifExport *ep, Eterm res)
{
- Process* proc;
- Eterm* reg;
- NifExport* ep;
- int i, scheduler;
-
- execution_state(env, &proc, &scheduler);
-
- ASSERT(scheduler);
-
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc)
- & ERTS_PROC_LOCK_MAIN);
-
- reg = erts_proc_sched_data(proc)->x_reg_array;
-
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- if (!ep)
- ep = allocate_nif_sched_data(proc, argc);
- else if (need_save && ep->rootset_extra < argc) {
- NifExport* new_ep = allocate_nif_sched_data(proc, argc);
- destroy_nif_export(ep);
- ep = new_ep;
- }
- if (env->exception_thrown) {
- ep->exception_thrown = 1;
- ep->rootset[0] = proc->fvalue;
- } else {
- ep->exception_thrown = 0;
- ep->rootset[0] = NIL;
- }
- if (scheduler > 0)
- ERTS_VBUMP_ALL_REDS(proc);
- for (i = 0; i < argc; i++) {
- if (need_save)
- ep->rootset[i+1] = reg[i];
- reg[i] = (Eterm) argv[i];
- }
- if (need_save) {
- ASSERT(proc->current);
- ep->saved_current = proc->current;
- ep->saved_argc = argc;
- }
- proc->i = (BeamInstr*) ep->exp.addressv[0];
- ep->exp.code[0] = (BeamInstr) proc->current[0];
- ep->exp.code[1] = (BeamInstr) proc->current[1];
- ep->exp.code[2] = argc;
- ep->exp.code[4] = (BeamInstr) direct_fp;
- ep->m = env->mod_nif;
- ep->fp = indirect_fp;
- proc->freason = TRAP;
- proc->arity = argc;
- return THE_NON_VALUE;
+ erts_nif_export_restore(c_p, ep, res);
+ ASSERT(ep->m);
+ nif_export_cleanup_nif_mod(ep);
}
-/*
- * Restore saved MFA and registers. Registers are restored only when the
- * exception flag is true.
- */
-static void
-restore_nif_mfa(Process* proc, NifExport* ep, int exception)
-{
- int i;
- ERTS_SMP_LC_ASSERT(!(proc->static_flags
- & ERTS_STC_FLG_SHADOW_PROC));
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc)
- & ERTS_PROC_LOCK_MAIN);
-
- ASSERT(ep->saved_current != &ep->exp.code[0]);
- proc->current = ep->saved_current;
- ep->saved_current = NULL;
- if (exception) {
- Eterm* reg = erts_proc_sched_data(proc)->x_reg_array;
- for (i = 0; i < ep->saved_argc; i++)
- reg[i] = ep->rootset[i+1];
- }
- ep->saved_argc = 0;
-}
-
-#ifdef ERTS_DIRTY_SCHEDULERS
/*
* Finalize a dirty NIF call. This function is scheduled to cause the VM to
* switch the process off a dirty scheduler thread and back onto a regular
* scheduler thread, and then return the result from the dirty NIF. It also
* restores the original NIF MFA when necessary based on the value of
- * ep->fp set by execute_dirty_nif via init_nif_sched_data -- non-NULL
+ * ep->func set by execute_dirty_nif via init_nif_sched_data -- non-NULL
* means restore, NULL means do not restore.
*/
static ERL_NIF_TERM
@@ -2456,9 +2751,7 @@ dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
- ASSERT(!ep->exception_thrown);
- if (ep->fp)
- restore_nif_mfa(proc, ep, 0);
+ nif_export_restore(proc, ep, argv[0]);
return argv[0];
}
@@ -2468,149 +2761,102 @@ dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ERL_NIF_TERM
dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
+ ERL_NIF_TERM ret;
Process* proc;
NifExport* ep;
+ Eterm exception;
execution_state(env, &proc, NULL);
+ ASSERT(argc == 1);
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
- ASSERT(ep->exception_thrown);
- if (ep->fp)
- restore_nif_mfa(proc, ep, 1);
- return enif_raise_exception(env, ep->rootset[0]);
+ exception = argv[0]; /* argv overwritten by restore below... */
+ nif_export_cleanup_nif_mod(ep);
+ ret = enif_raise_exception(env, exception);
+
+ /* Restore orig info for error and clear nif export in handle_error() */
+ proc->freason |= EXF_RESTORE_NIF;
+ return ret;
}
/*
- * Dirty NIF execution wrapper function. Invoke an application's dirty NIF,
- * then check the result and schedule the appropriate finalizer function
- * where needed. Also restore the original NIF MFA when appropriate.
+ * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute.
+ * The dirty scheduler thread type (CPU or I/O) is indicated in flags
+ * parameter.
*/
-static ERL_NIF_TERM
-execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static ERTS_INLINE ERL_NIF_TERM
+schedule_dirty_nif(ErlNifEnv* env, int flags, NativeFunPtr fp,
+ Eterm func_name, int argc, const ERL_NIF_TERM argv[])
{
Process* proc;
- NativeFunPtr fp;
- NifExport* ep;
- ERL_NIF_TERM result;
-
- execution_state(env, &proc, NULL);
- fp = (NativeFunPtr) proc->current[6];
-
- ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
-
- /*
- * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution
- */
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep && fp);
-
- ep->fp = NULL;
- erts_smp_atomic32_read_band_mb(&proc->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC));
+ ASSERT(is_atom(func_name));
+ ASSERT(fp);
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
- result = (*fp)(env, argc, argv);
+ execution_state(env, &proc, NULL);
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ (void) erts_atomic32_read_bset_nob(&proc->state,
+ (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC),
+ (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND
+ ? ERTS_PSFLG_DIRTY_CPU_PROC
+ : ERTS_PSFLG_DIRTY_IO_PROC));
- if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL)
- close_lib(env->mod_nif);
- /*
- * If no more NIFs were scheduled by the native call via
- * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
- * which case we need to restore the original NIF calling
- * context. Reuse fp essentially as a boolean for this, passing it to
- * init_nif_sched_data below. Both dirty_nif_exception and
- * dirty_nif_finalizer then check ep->fp to decide whether or not to
- * restore the original calling context.
- */
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep);
- if (ep->fp)
- fp = NULL;
- if (is_non_value(result) || env->exception_thrown) {
- if (proc->freason != TRAP) {
- return init_nif_sched_data(env, dirty_nif_exception, fp, 0, argc, argv);
- } else {
- if (ep->fp == NULL)
- restore_nif_mfa(proc, ep, 1);
- return THE_NON_VALUE;
- }
- }
- else
- return init_nif_sched_data(env, dirty_nif_finalizer, fp, 0, 1, &result);
+ return schedule(env, fp, NULL, proc->current->module, func_name, argc, argv);
}
-/*
- * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute
- * via the execute_dirty_nif() wrapper function. The dirty scheduler thread
- * type (CPU or I/O) is indicated in flags parameter.
- */
static ERTS_INLINE ERL_NIF_TERM
-schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[])
+static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg,
+ int argc, const ERL_NIF_TERM argv[])
{
- ERL_NIF_TERM result;
- erts_aint32_t act, dirty_flag;
- Process* proc;
+ Process *proc;
+ NifExport *ep;
+ Eterm mod, func;
NativeFunPtr fp;
- NifExport* ep;
- int need_save, scheduler;
-
- execution_state(env, &proc, &scheduler);
- if (scheduler <= 0) {
- ASSERT(scheduler < 0);
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
- }
- fp = (NativeFunPtr) proc->current[6];
+ execution_state(env, &proc, NULL);
- ASSERT(fp);
+ /*
+ * Called in order to schedule statically determined
+ * dirty NIF calls...
+ *
+ * Note that 'current' does not point into a NifExport
+ * structure; only a structure with similar
+ * parts (located in code).
+ */
- ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
+ ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
+ mod = proc->current->module;
+ func = proc->current->function;
+ fp = (NativeFunPtr) ep->func;
- if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC;
- else
- dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC;
+ ASSERT(is_atom(mod) && is_atom(func));
+ ASSERT(fp);
- act = erts_smp_atomic32_read_bor_nob(&proc->state, dirty_flag);
- if (!(act & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)))
- erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
- else if ((act & (ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC)) & ~dirty_flag) {
- /* clear other flag... */
- if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC;
- else
- dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC;
- erts_smp_atomic32_read_band_nob(&proc->state, ~dirty_flag);
- }
+ (void) erts_atomic32_read_bset_nob(&proc->state,
+ (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC),
+ dirty_psflg);
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- need_save = (ep == NULL || !ep->saved_current);
- result = init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
- if (scheduler <= 0)
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- return result;
+ return schedule(env, fp, NULL, mod, func, argc, argv);
}
static ERL_NIF_TERM
-schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static_schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_IO_BOUND, argc, argv);
+ return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_IO_PROC, argc, argv);
}
static ERL_NIF_TERM
-schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, argc, argv);
+ return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv);
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
/*
* NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
@@ -2626,22 +2872,42 @@ execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
ERL_NIF_TERM result;
execution_state(env, &proc, NULL);
- fp = (NativeFunPtr) proc->current[6];
- ASSERT(!env->exception_thrown);
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ep = ErtsContainerStruct(proc->current, NifExport, exp.info.mfa);
+ fp = ep->func;
ASSERT(ep);
- ep->fp = NULL;
+ ASSERT(!env->exception_thrown);
+
+ fp = (NativeFunPtr) ep->func;
+
+#ifdef DEBUG
+ ep->func = ERTS_DBG_NIF_NOT_SCHED_MARKER;
+#endif
+
result = (*fp)(env, argc, argv);
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep);
- /*
- * If no NIFs were scheduled by the native call via
- * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
- * which case we need to restore the original NIF MFA.
- */
- if (ep->fp == NULL)
- restore_nif_mfa(proc, ep, env->exception_thrown);
+
+ ASSERT(ep == ERTS_PROC_GET_NIF_TRAP_EXPORT(proc));
+
+ if (is_value(result) || proc->freason != TRAP) {
+ /* Done (not rescheduled)... */
+ ASSERT(ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER);
+ if (!env->exception_thrown)
+ nif_export_restore(proc, ep, result);
+ else {
+ nif_export_cleanup_nif_mod(ep);
+ /*
+ * Restore orig info for error and clear nif
+ * export in handle_error()
+ */
+ proc->freason |= EXF_RESTORE_NIF;
+ }
+ }
+
+#ifdef DEBUG
+ if (ep->func == ERTS_DBG_NIF_NOT_SCHED_MARKER)
+ ep->func = NULL;
+#endif
+
return result;
}
@@ -2651,9 +2917,8 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
int argc, const ERL_NIF_TERM argv[])
{
Process* proc;
- NifExport* ep;
ERL_NIF_TERM fun_name_atom, result;
- int need_save, scheduler;
+ int scheduler;
if (argc > MAX_ARG)
return enif_make_badarg(env);
@@ -2665,40 +2930,20 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
if (scheduler <= 0) {
if (scheduler == 0)
enif_make_badarg(env);
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
}
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- need_save = (ep == NULL || !ep->saved_current);
-
- if (flags) {
-#ifdef ERTS_DIRTY_SCHEDULERS
- NativeFunPtr sched_fun;
- int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
- if (chkflgs == ERL_NIF_DIRTY_JOB_IO_BOUND)
- sched_fun = schedule_dirty_io_nif;
- else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- sched_fun = schedule_dirty_cpu_nif;
- else {
- result = enif_make_badarg(env);
- goto done;
- }
- result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv);
-#else
- result = enif_make_badarg(env);
-#endif
- goto done;
+ if (flags == 0)
+ result = schedule(env, execute_nif, fp, proc->current->module,
+ fun_name_atom, argc, argv);
+ else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) {
+ result = schedule_dirty_nif(env, flags, fp, fun_name_atom, argc, argv);
}
else
- result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv);
-
- ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep);
- ep->exp.code[1] = (BeamInstr) fun_name_atom;
+ result = enif_make_badarg(env);
-done:
if (scheduler < 0)
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
return result;
}
@@ -2711,14 +2956,17 @@ enif_thread_type(void)
if (!esdp)
return ERL_NIF_THR_UNDEFINED;
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+ switch (esdp->type) {
+ case ERTS_SCHED_NORMAL:
return ERL_NIF_THR_NORMAL_SCHEDULER;
-
- if (ERTS_SCHEDULER_IS_DIRTY_CPU(esdp))
+ case ERTS_SCHED_DIRTY_CPU:
return ERL_NIF_THR_DIRTY_CPU_SCHEDULER;
-
- ASSERT(ERTS_SCHEDULER_IS_DIRTY_IO(esdp));
- return ERL_NIF_THR_DIRTY_IO_SCHEDULER;
+ case ERTS_SCHED_DIRTY_IO:
+ return ERL_NIF_THR_DIRTY_IO_SCHEDULER;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ return -1;
+ }
}
/* Maps */
@@ -2759,6 +3007,44 @@ ERL_NIF_TERM enif_make_new_map(ErlNifEnv* env)
return make_flatmap(mp);
}
+int enif_make_map_from_arrays(ErlNifEnv *env,
+ ERL_NIF_TERM keys[],
+ ERL_NIF_TERM values[],
+ size_t cnt,
+ ERL_NIF_TERM *map_out)
+{
+ ErtsHeapFactory factory;
+ int succeeded;
+
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ size_t index = 0;
+
+ while (index < cnt) {
+ ASSERT_IN_ENV(env, keys[index], index, "key");
+ ASSERT_IN_ENV(env, values[index], index, "value");
+ index++;
+ }
+#endif
+
+ flush_env(env);
+
+ erts_factory_proc_prealloc_init(&factory, env->proc,
+ cnt * 2 + MAP_HEADER_FLATMAP_SZ + 1);
+
+ (*map_out) = erts_map_from_ks_and_vs(&factory, keys, values, cnt);
+ succeeded = (*map_out) != THE_NON_VALUE;
+
+ if (!succeeded) {
+ erts_factory_undo(&factory);
+ }
+
+ erts_factory_close(&factory);
+
+ cache_env(env);
+
+ return succeeded;
+}
+
int enif_make_map_put(ErlNifEnv* env,
Eterm map_in,
Eterm key,
@@ -2768,6 +3054,10 @@ int enif_make_map_put(ErlNifEnv* env,
if (!is_map(map_in)) {
return 0;
}
+ ASSERT_IN_ENV(env, map_in, 0, "old map");
+ ASSERT_IN_ENV(env, key, 0, "key");
+ ASSERT_IN_ENV(env, value, 0, "value");
+
flush_env(env);
*map_out = erts_maps_put(env->proc, key, value, map_in);
cache_env(env);
@@ -2802,6 +3092,10 @@ int enif_make_map_update(ErlNifEnv* env,
return 0;
}
+ ASSERT_IN_ENV(env, map_in, 0, "old map");
+ ASSERT_IN_ENV(env, key, 0, "key");
+ ASSERT_IN_ENV(env, value, 0, "value");
+
flush_env(env);
res = erts_maps_update(env->proc, key, value, map_in, map_out);
cache_env(env);
@@ -3003,22 +3297,540 @@ int enif_map_iterator_get_pair(ErlNifEnv *env,
return 0;
}
+int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
+ ErlNifMonitor* monitor)
+{
+ ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
+ Eterm tmp[ERTS_REF_THING_SIZE];
+ Eterm ref;
+ ErtsResourceMonitors *rm;
+ ErtsMonitorData *mdp;
+
+ ASSERT(ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->magic_binary.destructor
+ == NIF_RESOURCE_DTOR);
+ ASSERT(erts_refc_read(&ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->binary.intern.refc, 0) != 0);
+ ASSERT(!rsrc->monitors == !rsrc->type->down);
+
+ rm = rsrc->monitors;
+ if (!rm) {
+ ASSERT(!rsrc->type->down);
+ return -1;
+ }
+ ASSERT(rsrc->type->down);
+
+ ref = erts_make_ref_in_buffer(tmp);
+
+ mdp = erts_monitor_create(ERTS_MON_TYPE_RESOURCE, ref,
+ (Eterm) rsrc, target_pid->pid, NIL);
+ erts_mtx_lock(&rm->lock);
+ ASSERT(!rmon_is_dying(rm));
+ erts_monitor_tree_insert(&rm->root, &mdp->origin);
+ rmon_refc_inc(rm);
+ erts_mtx_unlock(&rm->lock);
+
+ if (!erts_proc_sig_send_monitor(&mdp->target, target_pid->pid)) {
+ /* Failed to send monitor signal; cleanup... */
+#ifdef DEBUG
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc);
+#endif
+
+ erts_mtx_lock(&rm->lock);
+ ASSERT(!rmon_is_dying(rm));
+ erts_monitor_tree_delete(&rm->root, &mdp->origin);
+ rmon_refc_dec(rm);
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 1) != 0);
+ erts_mtx_unlock(&rm->lock);
+ erts_monitor_release_both(mdp);
+
+ return 1;
+ }
+
+ if (monitor)
+ erts_ref_to_driver_monitor(ref,monitor);
+
+ return 0;
+}
+
+int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monitor)
+{
+ ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
+#ifdef DEBUG
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc);
+#endif
+ ErtsResourceMonitors *rm;
+ ErtsMonitor *mon;
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
+ Eterm ref;
+
+ ASSERT(bin->magic_binary.destructor == NIF_RESOURCE_DTOR);
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
+
+ ref = erts_driver_monitor_to_ref(ref_heap, monitor);
+
+ rm = rsrc->monitors;
+ erts_mtx_lock(&rm->lock);
+ ASSERT(!rmon_is_dying(rm));
+ mon = erts_monitor_tree_lookup(rm->root, ref);
+ if (mon)
+ erts_monitor_tree_delete(&rm->root, mon);
+ erts_mtx_unlock(&rm->lock);
+
+ if (!mon)
+ return 1;
+
+ ASSERT(erts_monitor_is_origin(mon));
+ ASSERT(is_internal_pid(mon->other.item));
+
+ erts_proc_sig_send_demonitor(mon);
+
+ return 0;
+}
+
+int enif_compare_monitors(const ErlNifMonitor *monitor1,
+ const ErlNifMonitor *monitor2)
+{
+ return sys_memcmp((void *) monitor1, (void *) monitor2,
+ ERTS_REF_THING_SIZE*sizeof(Eterm));
+}
+
+ErlNifIOQueue *enif_ioq_create(ErlNifIOQueueOpts opts)
+{
+ ErlNifIOQueue *q;
+
+ if (opts != ERL_NIF_IOQ_NORMAL)
+ return NULL;
+
+ q = enif_alloc(sizeof(ErlNifIOQueue));
+ if (!q) return NULL;
+ erts_ioq_init(q, ERTS_ALC_T_NIF, 0);
+
+ return q;
+}
+
+void enif_ioq_destroy(ErlNifIOQueue *q)
+{
+ erts_ioq_clear(q);
+ enif_free(q);
+}
+
+/* If the iovec was preallocated (Stack or otherwise) it needs to be marked as
+ * such to perform a proper free. */
+#define ERL_NIF_IOVEC_FLAGS_PREALLOC (1 << 0)
+
+void enif_free_iovec(ErlNifIOVec *iov)
+{
+ int i;
+ /* Decrement the refc of all the binaries */
+ for (i = 0; i < iov->iovcnt; i++) {
+ Binary *bptr = ((Binary**)iov->ref_bins)[i];
+ /* bptr can be null if enq_binary was used */
+ if (bptr && erts_refc_dectest(&bptr->intern.refc, 0) == 0) {
+ erts_bin_free(bptr);
+ }
+ }
+
+ if (!(iov->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
+ enif_free(iov);
+ }
+}
+
+typedef struct {
+ UWord sublist_length;
+ Eterm sublist_start;
+ Eterm sublist_end;
+
+ UWord referenced_size;
+ UWord copied_size;
+
+ UWord iovec_len;
+} iovec_slice_t;
+
+static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *result) {
+ Eterm lookahead;
+
+ result->sublist_start = list;
+ result->sublist_length = 0;
+ result->referenced_size = 0;
+ result->copied_size = 0;
+ result->iovec_len = 0;
+
+ lookahead = result->sublist_start;
+
+ while (is_list(lookahead)) {
+ UWord byte_size;
+ Eterm binary;
+ Eterm *cell;
+
+ cell = list_val(lookahead);
+ binary = CAR(cell);
+
+ if (!is_binary(binary)) {
+ return 0;
+ }
+
+ byte_size = binary_size(binary);
+
+ if (byte_size > 0) {
+ int bit_offset, bit_size;
+ Eterm parent_binary;
+ UWord byte_offset;
+
+ int requires_copying;
+
+ ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset,
+ bit_offset, bit_size);
+
+ (void)byte_offset;
+
+ if (bit_size != 0) {
+ return 0;
+ }
+
+ /* If we're unaligned or an on-heap binary we'll need to copy
+ * ourselves over to a temporary buffer. */
+ requires_copying = (bit_offset != 0) ||
+ thing_subtag(*binary_val(parent_binary)) == HEAP_BINARY_SUBTAG;
+
+ if (requires_copying) {
+ result->copied_size += byte_size;
+ } else {
+ result->referenced_size += byte_size;
+ }
+
+ result->iovec_len += 1 + byte_size / MAX_SYSIOVEC_IOVLEN;
+ }
+
+ result->sublist_length += 1;
+ lookahead = CDR(cell);
+
+ if (result->sublist_length >= max_length) {
+ break;
+ }
+ }
+
+ if (!is_nil(lookahead) && !is_list(lookahead)) {
+ return 0;
+ }
+
+ result->sublist_end = lookahead;
+
+ return 1;
+}
+
+static void marshal_iovec_binary(Eterm binary, ErlNifBinary *copy_buffer,
+ UWord *copy_offset, ErlNifBinary *result) {
+
+ Eterm *parent_header;
+ Eterm parent_binary;
+
+ int bit_offset, bit_size;
+ Uint byte_offset;
+
+ ASSERT(is_binary(binary));
+
+ ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset, bit_offset, bit_size);
+
+ ASSERT(bit_size == 0);
+
+ parent_header = binary_val(parent_binary);
+
+ result->size = binary_size(binary);
+
+ if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) {
+ ProcBin *pb = (ProcBin*)parent_header;
+
+ if (pb->flags & (PB_IS_WRITABLE | PB_ACTIVE_WRITER)) {
+ erts_emasculate_writable_binary(pb);
+ }
+
+ ASSERT(pb->val != NULL);
+ ASSERT(byte_offset < pb->size);
+ ASSERT(&pb->bytes[byte_offset] >= (byte*)(pb->val)->orig_bytes);
+
+ result->data = (unsigned char*)&pb->bytes[byte_offset];
+ result->ref_bin = (void*)pb->val;
+ } else {
+ ErlHeapBin *hb = (ErlHeapBin*)parent_header;
+
+ ASSERT(thing_subtag(*parent_header) == HEAP_BINARY_SUBTAG);
+
+ result->data = &((unsigned char*)&hb->data)[byte_offset];
+ result->ref_bin = NULL;
+ }
+
+ /* If this isn't an *aligned* refc binary, copy its contents to the buffer
+ * and reference that instead. */
+
+ if (result->ref_bin == NULL || bit_offset != 0) {
+ ASSERT(copy_buffer->ref_bin != NULL && copy_buffer->data != NULL);
+ ASSERT(result->size <= (copy_buffer->size - *copy_offset));
+
+ if (bit_offset == 0) {
+ sys_memcpy(&copy_buffer->data[*copy_offset],
+ result->data, result->size);
+ } else {
+ erts_copy_bits(result->data, bit_offset, 1,
+ (byte*)&copy_buffer->data[*copy_offset], 0, 1,
+ result->size * 8);
+ }
+
+ result->data = &copy_buffer->data[*copy_offset];
+ result->ref_bin = copy_buffer->ref_bin;
+
+ *copy_offset += result->size;
+ }
+}
+
+static int fill_iovec_with_slice(ErlNifEnv *env,
+ iovec_slice_t *slice,
+ ErlNifIOVec *iovec) {
+ ErlNifBinary copy_buffer = {0};
+ UWord copy_offset, iovec_idx;
+ Eterm sublist_iterator;
+
+ /* Set up a common refc binary for all on-heap and unaligned binaries. */
+ if (slice->copied_size > 0) {
+ if (!enif_alloc_binary(slice->copied_size, &copy_buffer)) {
+ return 0;
+ }
+
+ ASSERT(copy_buffer.ref_bin != NULL);
+ }
+
+ sublist_iterator = slice->sublist_start;
+ copy_offset = 0;
+ iovec_idx = 0;
+
+ while (sublist_iterator != slice->sublist_end) {
+ ErlNifBinary raw_data;
+ Eterm *cell;
+
+ cell = list_val(sublist_iterator);
+ marshal_iovec_binary(CAR(cell), &copy_buffer, &copy_offset, &raw_data);
+
+ while (raw_data.size > 0) {
+ UWord chunk_len = MIN(raw_data.size, MAX_SYSIOVEC_IOVLEN);
+
+ ASSERT(iovec_idx < iovec->iovcnt);
+ ASSERT(raw_data.ref_bin != NULL);
+
+ iovec->iov[iovec_idx].iov_base = raw_data.data;
+ iovec->iov[iovec_idx].iov_len = chunk_len;
+
+ iovec->ref_bins[iovec_idx] = raw_data.ref_bin;
+
+ raw_data.data += chunk_len;
+ raw_data.size -= chunk_len;
+
+ iovec_idx += 1;
+ }
+
+ sublist_iterator = CDR(cell);
+ }
+
+ ASSERT(iovec_idx == iovec->iovcnt);
+
+ if (env == NULL) {
+ int i;
+ for (i = 0; i < iovec->iovcnt; i++) {
+ Binary *refc_binary = (Binary*)(iovec->ref_bins[i]);
+ erts_refc_inc(&refc_binary->intern.refc, 1);
+ }
+
+ if (slice->copied_size > 0) {
+ /* Transfer ownership to the iovec; we've taken references to it in
+ * the above loop. */
+ enif_release_binary(&copy_buffer);
+ }
+ } else {
+ if (slice->copied_size > 0) {
+ /* Attach the binary to our environment and let the next minor GC
+ * get rid of it. This is slightly faster than using the tmp object
+ * list since it avoids off-heap allocations. */
+ erts_build_proc_bin(&MSO(env->proc),
+ alloc_heap(env, PROC_BIN_SIZE), copy_buffer.ref_bin);
+ }
+ }
+
+ return 1;
+}
+
+static int create_iovec_from_slice(ErlNifEnv *env,
+ iovec_slice_t *slice,
+ ErlNifIOVec **result) {
+ ErlNifIOVec *iovec = *result;
+
+ if (iovec && slice->iovec_len < ERL_NIF_IOVEC_SIZE) {
+ iovec->iov = iovec->small_iov;
+ iovec->ref_bins = iovec->small_ref_bin;
+ iovec->flags = ERL_NIF_IOVEC_FLAGS_PREALLOC;
+ } else {
+ UWord iov_offset, binv_offset, alloc_size;
+ char *alloc_base;
+
+ iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlNifIOVec));
+ binv_offset = iov_offset;
+ binv_offset += ERTS_ALC_DATA_ALIGN_SIZE(slice->iovec_len * sizeof(SysIOVec));
+ alloc_size = binv_offset;
+ alloc_size += slice->iovec_len * sizeof(Binary*);
+
+ /* When the user passes an environment, we attach the iovec to it so
+ * the user won't have to bother managing it (similar to
+ * enif_inspect_binary). It'll disappear once the environment is
+ * cleaned up. */
+ if (env != NULL) {
+ alloc_base = alloc_tmp_obj(env, alloc_size, &tmp_alloc_dtor);
+ } else {
+ alloc_base = erts_alloc(ERTS_ALC_T_NIF, alloc_size);
+ }
+
+ iovec = (ErlNifIOVec*)alloc_base;
+ iovec->iov = (SysIOVec*)(alloc_base + iov_offset);
+ iovec->ref_bins = (void**)(alloc_base + binv_offset);
+ iovec->flags = 0;
+ }
+
+ iovec->size = slice->referenced_size + slice->copied_size;
+ iovec->iovcnt = slice->iovec_len;
+
+ if(!fill_iovec_with_slice(env, slice, iovec)) {
+ if (env == NULL && !(iovec->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
+ erts_free(ERTS_ALC_T_NIF, iovec);
+ }
+
+ return 0;
+ }
+
+ *result = iovec;
+
+ return 1;
+}
+
+int enif_inspect_iovec(ErlNifEnv *env, size_t max_elements,
+ ERL_NIF_TERM list, ERL_NIF_TERM *tail,
+ ErlNifIOVec **iov) {
+ iovec_slice_t slice;
+
+ if(!examine_iovec_term(list, max_elements, &slice)) {
+ return 0;
+ } else if(!create_iovec_from_slice(env, &slice, iov)) {
+ return 0;
+ }
+
+ (*tail) = slice.sublist_end;
+
+ return 1;
+}
+
+/* */
+int enif_ioq_enqv(ErlNifIOQueue *q, ErlNifIOVec *iov, size_t skip)
+{
+ if(skip <= iov->size) {
+ return !erts_ioq_enqv(q, (ErtsIOVec*)iov, skip);
+ }
+
+ return 0;
+}
+
+int enif_ioq_enq_binary(ErlNifIOQueue *q, ErlNifBinary *bin, size_t skip)
+{
+ ErlNifIOVec vec = {1, bin->size, NULL, NULL, ERL_NIF_IOVEC_FLAGS_PREALLOC };
+ Binary *ref_bin = (Binary*)bin->ref_bin;
+ int res;
+ vec.iov = vec.small_iov;
+ vec.ref_bins = vec.small_ref_bin;
+ vec.iov[0].iov_base = bin->data;
+ vec.iov[0].iov_len = bin->size;
+ ((Binary**)(vec.ref_bins))[0] = ref_bin;
+
+ res = enif_ioq_enqv(q, &vec, skip);
+ enif_release_binary(bin);
+ return res;
+}
+
+size_t enif_ioq_size(ErlNifIOQueue *q)
+{
+ return erts_ioq_size(q);
+}
+
+int enif_ioq_deq(ErlNifIOQueue *q, size_t elems, size_t *size)
+{
+ if (erts_ioq_deq(q, elems) == -1)
+ return 0;
+ if (size)
+ *size = erts_ioq_size(q);
+ return 1;
+}
+
+int enif_ioq_peek_head(ErlNifEnv *env, ErlNifIOQueue *q, size_t *size, ERL_NIF_TERM *bin_term) {
+ SysIOVec *iov_entry;
+ Binary *ref_bin;
+
+ if (q->size == 0) {
+ return 0;
+ }
+
+ ASSERT(q->b_head != q->b_tail && q->v_head != q->v_tail);
+
+ ref_bin = &q->b_head[0]->nif;
+ iov_entry = &q->v_head[0];
+
+ if (size != NULL) {
+ *size = iov_entry->iov_len;
+ }
+
+ if (iov_entry->iov_len > ERL_ONHEAP_BIN_LIMIT) {
+ ProcBin *pb = (ProcBin*)alloc_heap(env, PROC_BIN_SIZE);
+
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->next = MSO(env->proc).first;
+ pb->val = ref_bin;
+ pb->flags = 0;
+
+ ASSERT((byte*)iov_entry->iov_base >= (byte*)ref_bin->orig_bytes);
+ ASSERT(iov_entry->iov_len <= ref_bin->orig_size);
+
+ pb->bytes = (byte*)iov_entry->iov_base;
+ pb->size = iov_entry->iov_len;
+
+ MSO(env->proc).first = (struct erl_off_heap_header*) pb;
+ OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm));
+
+ erts_refc_inc(&ref_bin->intern.refc, 2);
+ *bin_term = make_binary(pb);
+ } else {
+ ErlHeapBin* hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(iov_entry->iov_len));
+
+ hb->thing_word = header_heap_bin(iov_entry->iov_len);
+ hb->size = iov_entry->iov_len;
+
+ sys_memcpy(hb->data, iov_entry->iov_base, iov_entry->iov_len);
+ *bin_term = make_binary(hb);
+ }
+
+ return 1;
+}
+
+SysIOVec *enif_ioq_peek(ErlNifIOQueue *q, int *iovlen)
+{
+ return erts_ioq_peekq(q, iovlen);
+}
+
/***************************************************************************
** load_nif/2 **
***************************************************************************/
-static BeamInstr** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsigned arity)
+static ErtsCodeInfo** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsigned arity)
{
int n = (int) mod_code->num_functions;
int j;
for (j = 0; j < n; ++j) {
- BeamInstr* code_ptr = (BeamInstr*) mod_code->functions[j];
- ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (f_atom == ((Eterm) code_ptr[3])
- && arity == ((unsigned) code_ptr[4])) {
-
- return (BeamInstr**) &mod_code->functions[j];
+ ErtsCodeInfo* ci = mod_code->functions[j];
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
+ if (f_atom == ci->mfa.function
+ && arity == ci->mfa.arity) {
+ return mod_code->functions+j;
}
}
return NULL;
@@ -3029,16 +3841,26 @@ static Eterm mkatom(const char *str)
return am_atom_put(str, sys_strlen(str));
}
-static struct tainted_module_t
+struct tainted_module_t
{
struct tainted_module_t* next;
Eterm module_atom;
-}*first_tainted_module = NULL;
+};
+
+erts_atomic_t first_taint; /* struct tainted_module_t* */
-static void add_taint(Eterm mod_atom)
+void erts_add_taint(Eterm mod_atom)
{
- struct tainted_module_t* t;
- for (t=first_tainted_module ; t!=NULL; t=t->next) {
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ extern erts_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */
+#endif
+ struct tainted_module_t *first, *t;
+
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)
+ || erts_thr_progress_is_blocking());
+
+ first = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
+ for (t=first ; t; t=t->next) {
if (t->module_atom == mod_atom) {
return;
}
@@ -3046,38 +3868,42 @@ static void add_taint(Eterm mod_atom)
t = erts_alloc_fnf(ERTS_ALC_T_TAINT, sizeof(*t));
if (t != NULL) {
t->module_atom = mod_atom;
- t->next = first_tainted_module;
- first_tainted_module = t;
+ t->next = first;
+ erts_atomic_set_nob(&first_taint, (erts_aint_t)t);
}
}
Eterm erts_nif_taints(Process* p)
{
- struct tainted_module_t* t;
+ struct tainted_module_t *first, *t;
unsigned cnt = 0;
Eterm list = NIL;
Eterm* hp;
- for (t=first_tainted_module ; t!=NULL; t=t->next) {
+
+ first = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
+ for (t=first ; t!=NULL; t=t->next) {
cnt++;
}
hp = HAlloc(p,cnt*2);
- for (t=first_tainted_module ; t!=NULL; t=t->next) {
+ for (t=first ; t!=NULL; t=t->next) {
list = CONS(hp, t->module_atom, list);
hp += 2;
}
return list;
}
-void erts_print_nif_taints(int to, void* to_arg)
+void erts_print_nif_taints(fmtfn_t to, void* to_arg)
{
- struct tainted_module_t* t;
+ struct tainted_module_t *t;
const char* delim = "";
- for (t=first_tainted_module ; t!=NULL; t=t->next) {
+
+ t = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
+ for ( ; t; t = t->next) {
const Atom* atom = atom_tab(atom_val(t->module_atom));
- erts_print(to,to_arg,"%s%.*s", delim, atom->len, atom->name);
+ erts_cbprintf(to,to_arg,"%s%.*s", delim, atom->len, atom->name);
delim = ",";
}
- erts_print(to,to_arg,"\n");
+ erts_cbprintf(to,to_arg,"\n");
}
@@ -3110,39 +3936,67 @@ static Eterm load_nif_error(Process* p, const char* atom, const char* format, ..
return ret;
}
+#define AT_LEAST_VERSION(E,MAJ,MIN) \
+ (((E)->major * 0x100 + (E)->minor) >= ((MAJ) * 0x100 + (MIN)))
+
/*
- * The function below is for looping through ErlNifFunc arrays, helping
- * provide backwards compatibility across the version 2.7 change that added
- * the "flags" field to ErlNifFunc.
+ * Allocate erl_module_nif and make a _modern_ copy of the lib entry.
*/
-static ErlNifFunc* next_func(ErlNifEntry* entry, int* incrp, ErlNifFunc* func)
+static struct erl_module_nif* create_lib(const ErlNifEntry* src)
{
- ASSERT(incrp);
- if (!*incrp) {
- if (entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
- *incrp = sizeof(ErlNifFunc);
- else {
- /*
- * ErlNifFuncV1 below is what ErlNifFunc was before the
- * addition of the flags field for 2.7, and is needed to handle
- * backward compatibility.
- */
- typedef struct {
- const char* name;
- unsigned arity;
- ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
- }ErlNifFuncV1;
- *incrp = sizeof(ErlNifFuncV1);
- }
+ struct erl_module_nif* lib;
+ ErlNifEntry* dst;
+ Uint bytes = offsetof(struct erl_module_nif, _funcs_copy_);
+
+ if (!AT_LEAST_VERSION(src, 2, 7))
+ bytes += src->num_of_funcs * sizeof(ErlNifFunc);
+
+ lib = erts_alloc(ERTS_ALC_T_NIF, bytes);
+ dst = &lib->entry;
+
+ sys_memcpy(dst, src, offsetof(ErlNifEntry, vm_variant));
+
+ if (AT_LEAST_VERSION(src, 2, 1)) {
+ dst->vm_variant = src->vm_variant;
+ } else {
+ dst->vm_variant = "beam.vanilla";
}
- return (ErlNifFunc*) ((char*)func + *incrp);
-}
+ if (AT_LEAST_VERSION(src, 2, 7)) {
+ dst->options = src->options;
+ } else {
+ /*
+ * Make a modern copy of the ErlNifFunc array
+ */
+ struct ErlNifFunc_V1 {
+ const char* name;
+ unsigned arity;
+ ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ }*src_funcs = (struct ErlNifFunc_V1*) src->funcs;
+ int i;
+ for (i = 0; i < src->num_of_funcs; ++i) {
+ sys_memcpy(&lib->_funcs_copy_[i], &src_funcs[i], sizeof(*src_funcs));
+ lib->_funcs_copy_[i].flags = 0;
+ }
+ dst->funcs = lib->_funcs_copy_;
+ dst->options = 0;
+ }
+ if (AT_LEAST_VERSION(src, 2, 12)) {
+ dst->sizeof_ErlNifResourceTypeInit = src->sizeof_ErlNifResourceTypeInit;
+ } else {
+ dst->sizeof_ErlNifResourceTypeInit = 0;
+ }
+ if (AT_LEAST_VERSION(src, 2, 14)) {
+ dst->min_erts = src->min_erts;
+ } else {
+ dst->min_erts = "erts-?";
+ }
+ return lib;
+};
BIF_RETTYPE load_nif_2(BIF_ALIST_2)
{
static const char bad_lib[] = "bad_lib";
- static const char reload[] = "reload";
static const char upgrade[] = "upgrade";
char* lib_name = NULL;
void* handle = NULL;
@@ -3154,15 +4008,21 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
Eterm mod_atom;
const Atom* mod_atomp;
Eterm f_atom;
- BeamInstr* caller;
+ ErtsCodeMFA* caller;
ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
Eterm ret = am_ok;
int veto;
+ int taint = 1;
struct erl_module_nif* lib = NULL;
- int reload_warning = 0;
struct erl_module_instance* this_mi;
struct erl_module_instance* prev_mi;
+ if (BIF_P->flags & F_HIPE_MODE) {
+ ret = load_nif_error(BIF_P, "notsup", "Calling load_nif from HiPE compiled "
+ "modules not supported");
+ BIF_RET(ret);
+ }
+
encoding = erts_get_native_filename_encoding();
if (encoding == ERL_FILENAME_WIN_WCHAR) {
/* Do not convert the lib name to utf-16le yet, do that in win32 specific code */
@@ -3183,26 +4043,31 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
/* Block system (is this the right place to do it?) */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
/* Find calling module */
ASSERT(BIF_P->current != NULL);
- ASSERT(BIF_P->current[0] == am_erlang
- && BIF_P->current[1] == am_load_nif
- && BIF_P->current[2] == 2);
+ ASSERT(BIF_P->current->module == am_erlang
+ && BIF_P->current->function == am_load_nif
+ && BIF_P->current->arity == 2);
caller = find_function_from_pc(BIF_P->cp);
ASSERT(caller != NULL);
- mod_atom = caller[0];
+ mod_atom = caller->module;
ASSERT(is_atom(mod_atom));
module_p = erts_get_module(mod_atom, erts_active_code_ix());
ASSERT(module_p != NULL);
mod_atomp = atom_tab(atom_val(mod_atom));
- init_func = erts_static_nif_get_nif_init((char*)mod_atomp->name, mod_atomp->len);
- if (init_func != NULL)
- handle = init_func;
-
+ {
+ ErtsStaticNifEntry* sne;
+ sne = erts_static_nif_get_nif_init((char*)mod_atomp->name, mod_atomp->len);
+ if (sne != NULL) {
+ init_func = sne->nif_init;
+ handle = init_func;
+ taint = sne->taint;
+ }
+ }
this_mi = &module_p->curr;
prev_mi = &module_p->old;
if (in_area(caller, module_p->old.code_hdr, module_p->old.code_length)) {
@@ -3219,8 +4084,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
this_mi = module_p->on_load;
}
- if (init_func == NULL &&
- (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
+ if (this_mi->nif != NULL) {
+ ret = load_nif_error(BIF_P,"reload","NIF library already loaded"
+ " (reload disallowed since OTP 20).");
+ }
+ else if (init_func == NULL &&
+ (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
const char slogan[] = "Failed to load NIF library";
if (strstr(errdesc.str, lib_name) != NULL) {
ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str);
@@ -3235,20 +4104,26 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
" function: '%s'", errdesc.str);
}
- else if ((add_taint(mod_atom),
+ else if ((taint ? erts_add_taint(mod_atom) : 0,
(entry = erts_sys_ddll_call_nif_init(init_func)) == NULL)) {
ret = load_nif_error(BIF_P, bad_lib, "Library init-call unsuccessful");
}
+ else if (entry->major > ERL_NIF_MAJOR_VERSION
+ || (entry->major == ERL_NIF_MAJOR_VERSION
+ && entry->minor > ERL_NIF_MINOR_VERSION)) {
+ char* fmt = "That '%T' NIF library needs %s or newer. Either try to"
+ " recompile the NIF lib or use a newer erts runtime.";
+ ret = load_nif_error(BIF_P, bad_lib, fmt, mod_atom, entry->min_erts);
+ }
else if (entry->major < ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
- || (ERL_NIF_MAJOR_VERSION < entry->major
- || (ERL_NIF_MAJOR_VERSION == entry->major
- && ERL_NIF_MINOR_VERSION < entry->minor))
|| (entry->major==2 && entry->minor == 5)) { /* experimental maps */
- ret = load_nif_error(BIF_P, bad_lib, "Library version (%d.%d) not compatible (with %d.%d).",
- entry->major, entry->minor, ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
+ char* fmt = "That old NIF library (%d.%d) is not compatible with this "
+ "erts runtime (%d.%d). Try recompile the NIF lib.";
+ ret = load_nif_error(BIF_P, bad_lib, fmt, entry->major, entry->minor,
+ ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
}
- else if (entry->minor >= 1
+ else if (AT_LEAST_VERSION(entry, 2, 1)
&& sys_strcmp(entry->vm_variant, ERL_NIF_VM_VARIANT) != 0) {
ret = load_nif_error(BIF_P, bad_lib, "Library (%s) not compiled for "
"this vm variant (%s).",
@@ -3259,20 +4134,25 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
" match calling module '%T'", entry->name, mod_atom);
}
else {
- /*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/
-
- int maybe_dirty_nifs = ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
- && (entry->options & ERL_NIF_DIRTY_NIF_OPTION));
- int incr = 0;
- ErlNifFunc* f = entry->funcs;
- for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
- BeamInstr** code_pp;
+ lib = create_lib(entry);
+ entry = &lib->entry; /* Use a guaranteed modern lib entry from now on */
+
+ lib->handle = handle;
+ erts_refc_init(&lib->rt_cnt, 0);
+ erts_refc_init(&lib->rt_dtor_cnt, 0);
+ ASSERT(opened_rt_list == NULL);
+ lib->mod = module_p;
+
+ for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
+ ErtsCodeInfo** ci_pp;
+ ErlNifFunc* f = &entry->funcs[i];
+
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
- || (code_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) {
+ || (ci_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
}
- else if (maybe_dirty_nifs && f->flags) {
+ else if (f->flags) {
/*
* If the flags field is non-zero and this emulator was
* built with dirty scheduler support, check that the flags
@@ -3280,20 +4160,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
* dirty scheduler support, treat a non-zero flags field as
* a load error.
*/
-#ifdef ERTS_DIRTY_SCHEDULERS
if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND)
ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
f->flags, mod_atom, f->name, f->arity);
-#else
- ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.",
- mod_atom, f->name, f->arity);
-#endif
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- else if (code_pp[1] - code_pp[0] < (5+4))
-#else
- else if (code_pp[1] - code_pp[0] < (5+3))
-#endif
+ else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0])
+ < BEAM_NIF_MIN_FUNC_SZ)
{
ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
" in module (%T:%s/%u too small)",
@@ -3301,7 +4173,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
/*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
mod_atom, f->name, f->arity);*/
- f = next_func(entry, &incr, f);
}
}
@@ -3309,132 +4180,69 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
goto error;
}
- /* Call load, reload or upgrade:
+ /* Call load or upgrade:
*/
-
- lib = erts_alloc(ERTS_ALC_T_NIF, sizeof(struct erl_module_nif));
- lib->handle = handle;
- lib->entry = entry;
- erts_refc_init(&lib->rt_cnt, 0);
- erts_refc_init(&lib->rt_dtor_cnt, 0);
- ASSERT(opened_rt_list == NULL);
- lib->mod = module_p;
env.mod_nif = lib;
- if (this_mi->nif != NULL) { /*************** Reload ******************/
- /*
- * Repeated load_nif calls from same Erlang module instance ("reload")
- * is deprecated and was only ment as a development feature not to
- * be used in production systems. (See warning below)
- */
- int k, old_incr = 0;
- ErlNifFunc* old_func;
- lib->priv_data = this_mi->nif->priv_data;
-
- ASSERT(this_mi->nif->entry != NULL);
- if (entry->reload == NULL) {
- ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library.");
- goto error;
- }
- /* Check that no NIF is removed */
- old_func = this_mi->nif->entry->funcs;
- for (k=0; k < this_mi->nif->entry->num_of_funcs; k++) {
- int incr = 0;
- ErlNifFunc* f = entry->funcs;
- for (i=0; i < entry->num_of_funcs; i++) {
- if (old_func->arity == f->arity
- && sys_strcmp(old_func->name, f->name) == 0) {
- break;
- }
- f = next_func(entry, &incr, f);
- }
- if (i == entry->num_of_funcs) {
- ret = load_nif_error(BIF_P,reload,"Reloaded library missing "
- "function %T:%s/%u\r\n", mod_atom,
- old_func->name, old_func->arity);
- goto error;
- }
- old_func = next_func(this_mi->nif->entry, &old_incr, old_func);
- }
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful.");
- }
- else {
- commit_opened_resource_types(lib);
- this_mi->nif->entry = NULL; /* to prevent 'unload' callback */
- erts_unload_nif(this_mi->nif);
- reload_warning = 1;
- }
+
+ lib->priv_data = NULL;
+ if (prev_mi->nif != NULL) { /**************** Upgrade ***************/
+ void* prev_old_data = prev_mi->nif->priv_data;
+ if (entry->upgrade == NULL) {
+ ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
+ goto error;
+ }
+ erts_pre_nif(&env, BIF_P, lib, NULL);
+ veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2);
+ erts_post_nif(&env);
+ if (veto) {
+ prev_mi->nif->priv_data = prev_old_data;
+ ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful (%d).", veto);
+ }
}
- else {
- lib->priv_data = NULL;
- if (prev_mi->nif != NULL) { /**************** Upgrade ***************/
- void* prev_old_data = prev_mi->nif->priv_data;
- if (entry->upgrade == NULL) {
- ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
- goto error;
- }
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- prev_mi->nif->priv_data = prev_old_data;
- ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful.");
- }
- else
- commit_opened_resource_types(lib);
- }
- else if (entry->load != NULL) { /********* Initial load ***********/
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
- erts_post_nif(&env);
- if (veto) {
- ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful.");
- }
- else
- commit_opened_resource_types(lib);
- }
+ else if (entry->load != NULL) { /********* Initial load ***********/
+ erts_pre_nif(&env, BIF_P, lib, NULL);
+ veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
+ erts_post_nif(&env);
+ if (veto) {
+ ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful (%d).", veto);
+ }
}
if (ret == am_ok) {
+ commit_opened_resource_types(lib);
+
/*
** Everything ok, patch the beam code with op_call_nif
*/
- int incr = 0;
- ErlNifFunc* f = entry->funcs;
-
this_mi->nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
- BeamInstr* code_ptr;
+ ErlNifFunc* f = &entry->funcs[i];
+ ErtsCodeInfo* ci;
+ BeamInstr *code_ptr;
+
erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
- code_ptr = *get_func_pp(this_mi->code_hdr, f_atom, f->arity);
+ ci = *get_func_pp(this_mi->code_hdr, f_atom, f->arity);
+ code_ptr = erts_codeinfo_to_code(ci);
- if (code_ptr[1] == 0) {
- code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
+ if (ci->u.gen_bp == NULL) {
+ code_ptr[0] = BeamOpCodeAddr(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- GenericBp* g = (GenericBp *) code_ptr[1];
- ASSERT(code_ptr[5+0] ==
- (BeamInstr) BeamOp(op_i_generic_breakpoint));
- g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
+ GenericBp* g = ci->u.gen_bp;
+ ASSERT(BeamIsOpCode(code_ptr[0], op_i_generic_breakpoint));
+ g->orig_instr = BeamOpCodeAddr(op_call_nif);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
- && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) {
- code_ptr[5+3] = (BeamInstr) f->fptr;
- code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
- (BeamInstr) schedule_dirty_io_nif :
- (BeamInstr) schedule_dirty_cpu_nif;
+ if (f->flags) {
+ code_ptr[3] = (BeamInstr) f->fptr;
+ code_ptr[1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
+ (BeamInstr) static_schedule_dirty_io_nif :
+ (BeamInstr) static_schedule_dirty_cpu_nif;
}
else
-#endif
- code_ptr[5+1] = (BeamInstr) f->fptr;
- code_ptr[5+2] = (BeamInstr) lib;
- f = next_func(entry, &incr, f);
+ code_ptr[1] = (BeamInstr) f->fptr;
+ code_ptr[2] = (BeamInstr) lib;
}
}
else {
@@ -3450,19 +4258,11 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_sys_ddll_free_error(&errdesc);
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
erts_free(ERTS_ALC_T_TMP, lib_name);
- if (reload_warning) {
- erts_dsprintf_buf_t* dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Repeated calls to erlang:load_nif from module '%T'.\n\n"
- "The NIF reload mechanism is deprecated and must not "
- "be used in production systems.\n", mod_atom);
- erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
- }
BIF_RET(ret);
}
@@ -3472,7 +4272,7 @@ erts_unload_nif(struct erl_module_nif* lib)
{
ErlNifResourceType* rt;
ErlNifResourceType* next;
- ASSERT(erts_smp_thr_progress_is_blocking());
+ ASSERT(erts_thr_progress_is_blocking());
ASSERT(lib != NULL);
ASSERT(lib->mod != NULL);
@@ -3512,7 +4312,8 @@ erts_unload_nif(struct erl_module_nif* lib)
void erl_nif_init()
{
- ERTS_CT_ASSERT((offsetof(ErlNifResource,data) % 8) == ERTS_MAGIC_BIN_BYTES_TO_ALIGN);
+ ERTS_CT_ASSERT((offsetof(ErtsResource,data) % 8)
+ == ERTS_MAGIC_BIN_BYTES_TO_ALIGN);
resource_type_list.next = &resource_type_list;
resource_type_list.prev = &resource_type_list;
@@ -3526,8 +4327,12 @@ void erl_nif_init()
int erts_nif_get_funcs(struct erl_module_nif* mod,
ErlNifFunc **funcs)
{
- *funcs = mod->entry->funcs;
- return mod->entry->num_of_funcs;
+ *funcs = mod->entry.funcs;
+ return mod->entry.num_of_funcs;
+}
+
+Module *erts_nif_get_module(struct erl_module_nif *nif_mod) {
+ return nif_mod->mod;
}
Eterm erts_nif_call_function(Process *p, Process *tracee,
@@ -3538,18 +4343,18 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
#ifdef DEBUG
/* Verify that function is part of this module */
int i;
- for (i = 0; i < mod->entry->num_of_funcs; i++)
- if (fun == &(mod->entry->funcs[i]))
+ for (i = 0; i < mod->entry.num_of_funcs; i++)
+ if (fun == &(mod->entry.funcs[i]))
break;
- ASSERT(i < mod->entry->num_of_funcs);
+ ASSERT(i < mod->entry.num_of_funcs);
if (p)
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
- || erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
+ || erts_thr_progress_is_blocking());
#endif
if (p) {
/* This is almost a normal nif call like in beam_emu,
- except that any heap fragment created in the nif will be
- discarded without checking if anything in it is live.
+ except that any heap consumed by the nif will be
+ released without checking if anything in it is live.
This is because we cannot do a GC here as we don't know
the number of live registers that have to be preserved.
This means that any heap part of the returned term may
@@ -3563,6 +4368,9 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
clear_offheap(&MSO(p));
erts_pre_nif(&env, p, mod, tracee);
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ env.dbg_disable_assert_in_env = 1;
+#endif
nif_result = (*fun->fptr)(&env, argc, argv);
if (env.exception_thrown)
nif_result = THE_NON_VALUE;
@@ -3585,6 +4393,9 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
so we create a phony one. */
struct enif_msg_environment_t msg_env;
pre_nif_noproc(&msg_env, mod, tracee);
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ msg_env.env.dbg_disable_assert_in_env = 1;
+#endif
nif_result = (*fun->fptr)(&msg_env.env, argc, argv);
if (msg_env.env.exception_thrown)
nif_result = THE_NON_VALUE;
@@ -3609,34 +4420,31 @@ static unsigned calc_checksum(unsigned char* ptr, unsigned size);
struct readonly_check_t
{
- struct enif_tmp_obj_t hdr;
unsigned char* ptr;
unsigned size;
unsigned checksum;
};
static void add_readonly_check(ErlNifEnv* env, unsigned char* ptr, unsigned sz)
{
- ErtsAlcType_t allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
- struct readonly_check_t* obj = erts_alloc(allocator,
- sizeof(struct readonly_check_t));
- obj->hdr.allocator = allocator;
- obj->hdr.next = env->tmp_obj_list;
- env->tmp_obj_list = &obj->hdr;
- obj->hdr.dtor = &readonly_check_dtor;
+ struct readonly_check_t* obj;
+
+ obj = alloc_tmp_obj(env, sizeof(struct readonly_check_t),
+ &readonly_check_dtor);
+
obj->ptr = ptr;
obj->size = sz;
- obj->checksum = calc_checksum(ptr, sz);
+ obj->checksum = calc_checksum(ptr, sz);
}
-static void readonly_check_dtor(struct enif_tmp_obj_t* o)
+static void readonly_check_dtor(struct enif_tmp_obj_t* tmp_obj)
{
- struct readonly_check_t* obj = (struct readonly_check_t*) o;
- unsigned chksum = calc_checksum(obj->ptr, obj->size);
- if (chksum != obj->checksum) {
+ struct readonly_check_t* ro_check = (struct readonly_check_t*)&tmp_obj[1];
+ unsigned chksum = calc_checksum(ro_check->ptr, ro_check->size);
+ if (chksum != ro_check->checksum) {
fprintf(stderr, "\r\nReadonly data written by NIF, checksums differ"
- " %x != %x\r\nABORTING\r\n", chksum, obj->checksum);
+ " %x != %x\r\nABORTING\r\n", chksum, ro_check->checksum);
abort();
}
- erts_free(obj->hdr.allocator, obj);
+ erts_free(tmp_obj->allocator, tmp_obj);
}
static unsigned calc_checksum(unsigned char* ptr, unsigned size)
{
@@ -3649,6 +4457,55 @@ static unsigned calc_checksum(unsigned char* ptr, unsigned size)
#endif /* READONLY_CHECK */
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+static void dbg_assert_in_env(ErlNifEnv* env, Eterm term,
+ int nr, const char* type, const char* func)
+{
+ Uint saved_used_size;
+ Eterm* real_htop;
+
+ if (is_immed(term)
+ || (is_non_value(term) && env->exception_thrown)
+ || erts_is_literal(term, ptr_val(term)))
+ return;
+
+ if (env->dbg_disable_assert_in_env) {
+ /*
+ * Trace nifs may cheat as built terms are discarded after return.
+ * ToDo: Check if 'term' is part of argv[].
+ */
+ return;
+ }
+
+ if (env->heap_frag) {
+ ASSERT(env->heap_frag == MBUF(env->proc));
+ ASSERT(env->hp >= env->heap_frag->mem);
+ ASSERT(env->hp <= env->heap_frag->mem + env->heap_frag->alloc_size);
+ saved_used_size = env->heap_frag->used_size;
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+ real_htop = NULL;
+ }
+ else {
+ real_htop = env->hp;
+ }
+ if (!erts_dbg_within_proc(ptr_val(term), env->proc, real_htop)) {
+ fprintf(stderr, "\r\nFAILED ASSERTION in %s:\r\n", func);
+ if (nr) {
+ fprintf(stderr, "Term #%d of the %s is not from same ErlNifEnv.",
+ nr, type);
+ }
+ else {
+ fprintf(stderr, "The %s is not from the same ErlNifEnv.", type);
+ }
+ fprintf(stderr, "\r\nABORTING\r\n");
+ abort();
+ }
+ if (env->heap_frag) {
+ env->heap_frag->used_size = saved_used_size;
+ }
+}
+#endif
+
#ifdef HAVE_USE_DTRACE
#define MESSAGE_BUFSIZ 1024
@@ -3662,7 +4519,7 @@ static void get_string_maybe(ErlNifEnv *env, const ERL_NIF_TERM term,
str_bin.size > bufsiz) {
*ptr = NULL;
} else {
- memcpy(buf, (char *) str_bin.data, str_bin.size);
+ sys_memcpy(buf, (char *) str_bin.data, str_bin.size);
buf[str_bin.size] = '\0';
*ptr = buf;
}
@@ -3679,7 +4536,7 @@ ERL_NIF_TERM erl_nif_user_trace_s1(ErlNifEnv* env, int argc,
message_bin.size > MESSAGE_BUFSIZ) {
return am_badarg;
}
- memcpy(messagebuf, (char *) message_bin.data, message_bin.size);
+ sys_memcpy(messagebuf, (char *) message_bin.data, message_bin.size);
messagebuf[message_bin.size] = '\0';
DTRACE1(user_trace_s1, messagebuf);
return am_true;
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 494971e118..4c09496ef1 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -49,10 +49,15 @@
** 2.8: 18.0 add enif_has_pending_exception
** 2.9: 18.2 enif_getenv
** 2.10: Time API
-** 2.11: 19.0 enif_snprintf
+** 2.11: 19.0 enif_snprintf
+** 2.12: 20.0 add enif_select, enif_open_resource_type_x
+** 2.13: 20.1 add enif_ioq
+** 2.14: 21.0 add enif_ioq_peek_head, enif_(mutex|cond|rwlock|thread)_name
+** enif_vfprintf, enif_vsnprintf, enif_make_map_from_arrays
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 11
+#define ERL_NIF_MINOR_VERSION 14
+#define ERL_NIF_MIN_ERTS_VERSION "erts-10.0 (OTP-21)"
/*
* The emulator will refuse to load a nif-lib with a major version
@@ -67,6 +72,8 @@
#define ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD 2
#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
#ifdef __cplusplus
extern "C" {
@@ -116,12 +123,19 @@ typedef struct enif_entry_t
int (*reload) (ErlNifEnv*, void** priv_data, ERL_NIF_TERM load_info);
int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
void (*unload) (ErlNifEnv*, void* priv_data);
+
+ /* Added in 2.1 */
const char* vm_variant;
- unsigned options;
-}ErlNifEntry;
-/* Field bits for ErlNifEntry options */
-#define ERL_NIF_DIRTY_NIF_OPTION 1
+ /* Added in 2.7 */
+ unsigned options; /* Unused. Can be set to 0 or 1 (dirty sched config) */
+
+ /* Added in 2.12 */
+ size_t sizeof_ErlNifResourceTypeInit;
+
+ /* Added in 2.14 */
+ const char* min_erts;
+}ErlNifEntry;
typedef struct
@@ -130,12 +144,23 @@ typedef struct
unsigned char* data;
/* Internals (avert your eyes) */
- ERL_NIF_TERM bin_term;
void* ref_bin;
+ /* for future additions to be ABI compatible (same struct size) */
+ void* __spare__[2];
}ErlNifBinary;
-typedef struct enif_resource_type_t ErlNifResourceType;
-typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+typedef void* ErlNifEvent; /* FIXME: Use 'HANDLE' somehow without breaking existing source */
+#else
+typedef int ErlNifEvent;
+#endif
+
+/* Return bits from enif_select: */
+#define ERL_NIF_SELECT_STOP_CALLED (1 << 0)
+#define ERL_NIF_SELECT_STOP_SCHEDULED (1 << 1)
+#define ERL_NIF_SELECT_INVALID_EVENT (1 << 2)
+#define ERL_NIF_SELECT_FAILED (1 << 3)
+
typedef enum
{
ERL_NIF_RT_CREATE = 1,
@@ -157,6 +182,19 @@ typedef struct
ERL_NIF_TERM port_id; /* internal, may change */
}ErlNifPort;
+typedef ErlDrvMonitor ErlNifMonitor;
+
+typedef struct enif_resource_type_t ErlNifResourceType;
+typedef void ErlNifResourceDtor(ErlNifEnv*, void*);
+typedef void ErlNifResourceStop(ErlNifEnv*, void*, ErlNifEvent, int is_direct_call);
+typedef void ErlNifResourceDown(ErlNifEnv*, void*, ErlNifPid*, ErlNifMonitor*);
+
+typedef struct {
+ ErlNifResourceDtor* dtor;
+ ErlNifResourceStop* stop; /* at ERL_NIF_SELECT_STOP event */
+ ErlNifResourceDown* down; /* enif_monitor_process */
+} ErlNifResourceTypeInit;
+
typedef ErlDrvSysInfo ErlNifSysInfo;
typedef struct ErlDrvTid_ *ErlNifTid;
@@ -209,6 +247,33 @@ typedef enum {
ERL_NIF_BIN2TERM_SAFE = 0x20000000
} ErlNifBinaryToTerm;
+typedef enum {
+ ERL_NIF_INTERNAL_HASH = 1,
+ ERL_NIF_PHASH2 = 2
+} ErlNifHash;
+
+#define ERL_NIF_IOVEC_SIZE 16
+
+typedef struct erl_nif_io_vec {
+ int iovcnt; /* length of vectors */
+ size_t size; /* total size in bytes */
+ SysIOVec *iov;
+
+ /* internals (avert your eyes) */
+ void **ref_bins; /* Binary[] */
+ int flags;
+
+ /* Used when stack allocating the io vec */
+ SysIOVec small_iov[ERL_NIF_IOVEC_SIZE];
+ void *small_ref_bin[ERL_NIF_IOVEC_SIZE];
+} ErlNifIOVec;
+
+typedef struct erts_io_queue ErlNifIOQueue;
+
+typedef enum {
+ ERL_NIF_IOQ_NORMAL = 1
+} ErlNifIOQueueOpts;
+
/*
* Return values from enif_thread_type(). Negative values
* reserved for specific types of non-scheduler threads.
@@ -266,8 +331,6 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
# define ERL_NIF_INIT_DECL(MODNAME) ERL_NIF_INIT_EXPORT ErlNifEntry* nif_init(ERL_NIF_INIT_ARGS)
#endif
-#define ERL_NIF_ENTRY_OPTIONS ERL_NIF_DIRTY_NIF_OPTION
-
#ifdef __cplusplus
}
# define ERL_NIF_INIT_PROLOGUE extern "C" {
@@ -293,7 +356,9 @@ ERL_NIF_INIT_DECL(NAME) \
FUNCS, \
LOAD, RELOAD, UPGRADE, UNLOAD, \
ERL_NIF_VM_VARIANT, \
- ERL_NIF_ENTRY_OPTIONS \
+ 1, \
+ sizeof(ErlNifResourceTypeInit), \
+ ERL_NIF_MIN_ERTS_VERSION \
}; \
ERL_NIF_INIT_BODY; \
return &entry; \
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index 9a8f216773..81f64f2390 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -92,7 +92,7 @@ ERL_NIF_API_FUNC_DECL(int,enif_thread_join,(ErlNifTid, void **respp));
ERL_NIF_API_FUNC_DECL(void*,enif_realloc,(void* ptr, size_t size));
ERL_NIF_API_FUNC_DECL(void,enif_system_info,(ErlNifSysInfo *sip, size_t si_size));
-ERL_NIF_API_FUNC_DECL(int,enif_fprintf,(void/* FILE* */ *filep, const char *format, ...));
+ERL_NIF_API_FUNC_DECL(int,enif_fprintf,(FILE* filep, const char *format, ...));
ERL_NIF_API_FUNC_DECL(int,enif_inspect_iolist_as_binary,(ErlNifEnv*, ERL_NIF_TERM term, ErlNifBinary* bin));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_sub_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, size_t pos, size_t size));
ERL_NIF_API_FUNC_DECL(int,enif_get_string,(ErlNifEnv*, ERL_NIF_TERM list, char* buf, unsigned len, ErlNifCharEncoding));
@@ -175,6 +175,40 @@ ERL_NIF_API_FUNC_DECL(size_t, enif_binary_to_term, (ErlNifEnv *env, const unsign
ERL_NIF_API_FUNC_DECL(int, enif_port_command, (ErlNifEnv *env, const ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg));
ERL_NIF_API_FUNC_DECL(int,enif_thread_type,(void));
ERL_NIF_API_FUNC_DECL(int,enif_snprintf,(char * buffer, size_t size, const char *format, ...));
+ERL_NIF_API_FUNC_DECL(int,enif_select,(ErlNifEnv* env, ErlNifEvent e, enum ErlNifSelectFlags flags, void* obj, const ErlNifPid* pid, ERL_NIF_TERM ref));
+ERL_NIF_API_FUNC_DECL(ErlNifResourceType*,enif_open_resource_type_x,(ErlNifEnv*, const char* name_str, const ErlNifResourceTypeInit*, ErlNifResourceFlags flags, ErlNifResourceFlags* tried));
+ERL_NIF_API_FUNC_DECL(int, enif_monitor_process,(ErlNifEnv*,void* obj,const ErlNifPid*,ErlNifMonitor *monitor));
+ERL_NIF_API_FUNC_DECL(int, enif_demonitor_process,(ErlNifEnv*,void* obj,const ErlNifMonitor *monitor));
+ERL_NIF_API_FUNC_DECL(int, enif_compare_monitors,(const ErlNifMonitor*,const ErlNifMonitor*));
+ERL_NIF_API_FUNC_DECL(ErlNifUInt64,enif_hash,(ErlNifHash type, ERL_NIF_TERM term, ErlNifUInt64 salt));
+ERL_NIF_API_FUNC_DECL(int, enif_whereis_pid, (ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPid *pid));
+ERL_NIF_API_FUNC_DECL(int, enif_whereis_port, (ErlNifEnv *env, ERL_NIF_TERM name, ErlNifPort *port));
+
+ERL_NIF_API_FUNC_DECL(ErlNifIOQueue *,enif_ioq_create,(ErlNifIOQueueOpts opts));
+ERL_NIF_API_FUNC_DECL(void,enif_ioq_destroy,(ErlNifIOQueue *q));
+
+ERL_NIF_API_FUNC_DECL(int,enif_ioq_enq_binary,(ErlNifIOQueue *q, ErlNifBinary *bin, size_t skip));
+ERL_NIF_API_FUNC_DECL(int,enif_ioq_enqv,(ErlNifIOQueue *q, ErlNifIOVec *iov, size_t skip));
+
+ERL_NIF_API_FUNC_DECL(size_t,enif_ioq_size,(ErlNifIOQueue *q));
+ERL_NIF_API_FUNC_DECL(int,enif_ioq_deq,(ErlNifIOQueue *q, size_t count, size_t *size));
+
+ERL_NIF_API_FUNC_DECL(SysIOVec*,enif_ioq_peek,(ErlNifIOQueue *q, int *iovlen));
+
+ERL_NIF_API_FUNC_DECL(int,enif_inspect_iovec,(ErlNifEnv *env, size_t max_length, ERL_NIF_TERM iovec_term, ERL_NIF_TERM *tail, ErlNifIOVec **iovec));
+ERL_NIF_API_FUNC_DECL(void,enif_free_iovec,(ErlNifIOVec *iov));
+
+ERL_NIF_API_FUNC_DECL(int,enif_ioq_peek_head,(ErlNifEnv *env, ErlNifIOQueue *q, size_t *size, ERL_NIF_TERM *head));
+
+ERL_NIF_API_FUNC_DECL(char*,enif_mutex_name,(ErlNifMutex*));
+ERL_NIF_API_FUNC_DECL(char*,enif_cond_name,(ErlNifCond*));
+ERL_NIF_API_FUNC_DECL(char*,enif_rwlock_name,(ErlNifRWLock*));
+ERL_NIF_API_FUNC_DECL(char*,enif_thread_name,(ErlNifTid));
+
+ERL_NIF_API_FUNC_DECL(int,enif_vfprintf,(FILE*, const char *fmt, va_list));
+ERL_NIF_API_FUNC_DECL(int,enif_vsnprintf,(char*, size_t, const char *fmt, va_list));
+
+ERL_NIF_API_FUNC_DECL(int,enif_make_map_from_arrays,(ErlNifEnv *env, ERL_NIF_TERM keys[], ERL_NIF_TERM values[], size_t cnt, ERL_NIF_TERM *map_out));
/*
** ADD NEW ENTRIES HERE (before this comment) !!!
@@ -332,6 +366,32 @@ ERL_NIF_API_FUNC_DECL(int,enif_snprintf,(char * buffer, size_t size, const char
# define enif_port_command ERL_NIF_API_FUNC_MACRO(enif_port_command)
# define enif_thread_type ERL_NIF_API_FUNC_MACRO(enif_thread_type)
# define enif_snprintf ERL_NIF_API_FUNC_MACRO(enif_snprintf)
+# define enif_select ERL_NIF_API_FUNC_MACRO(enif_select)
+# define enif_open_resource_type_x ERL_NIF_API_FUNC_MACRO(enif_open_resource_type_x)
+# define enif_monitor_process ERL_NIF_API_FUNC_MACRO(enif_monitor_process)
+# define enif_demonitor_process ERL_NIF_API_FUNC_MACRO(enif_demonitor_process)
+# define enif_compare_monitors ERL_NIF_API_FUNC_MACRO(enif_compare_monitors)
+# define enif_hash ERL_NIF_API_FUNC_MACRO(enif_hash)
+# define enif_whereis_pid ERL_NIF_API_FUNC_MACRO(enif_whereis_pid)
+# define enif_whereis_port ERL_NIF_API_FUNC_MACRO(enif_whereis_port)
+# define enif_ioq_create ERL_NIF_API_FUNC_MACRO(enif_ioq_create)
+# define enif_ioq_destroy ERL_NIF_API_FUNC_MACRO(enif_ioq_destroy)
+# define enif_ioq_enq ERL_NIF_API_FUNC_MACRO(enif_ioq_enq)
+# define enif_ioq_enq_binary ERL_NIF_API_FUNC_MACRO(enif_ioq_enq_binary)
+# define enif_ioq_enqv ERL_NIF_API_FUNC_MACRO(enif_ioq_enqv)
+# define enif_ioq_size ERL_NIF_API_FUNC_MACRO(enif_ioq_size)
+# define enif_ioq_deq ERL_NIF_API_FUNC_MACRO(enif_ioq_deq)
+# define enif_ioq_peek ERL_NIF_API_FUNC_MACRO(enif_ioq_peek)
+# define enif_inspect_iovec ERL_NIF_API_FUNC_MACRO(enif_inspect_iovec)
+# define enif_free_iovec ERL_NIF_API_FUNC_MACRO(enif_free_iovec)
+# define enif_ioq_peek_head ERL_NIF_API_FUNC_MACRO(enif_ioq_peek_head)
+# define enif_mutex_name ERL_NIF_API_FUNC_MACRO(enif_mutex_name)
+# define enif_cond_name ERL_NIF_API_FUNC_MACRO(enif_cond_name)
+# define enif_rwlock_name ERL_NIF_API_FUNC_MACRO(enif_rwlock_name)
+# define enif_thread_name ERL_NIF_API_FUNC_MACRO(enif_thread_name)
+# define enif_vfprintf ERL_NIF_API_FUNC_MACRO(enif_vfprintf)
+# define enif_vsnprintf ERL_NIF_API_FUNC_MACRO(enif_vsnprintf)
+# define enif_make_map_from_arrays ERL_NIF_API_FUNC_MACRO(enif_make_map_from_arrays)
/*
** ADD NEW ENTRIES HERE (before this comment)
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
index 0c76c6fe7d..eb23e1eaa5 100644
--- a/erts/emulator/beam/erl_node_container_utils.h
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -255,19 +255,16 @@ extern ErtsPTab erts_port;
* Refs *
\* */
+#define internal_ref_no_numbers(x) ERTS_REF_NUMBERS
+#define internal_ref_numbers(x) (is_internal_ordinary_ref((x)) \
+ ? internal_ordinary_ref_numbers((x)) \
+ : (ASSERT(is_internal_magic_ref((x))), \
+ internal_magic_ref_numbers((x))))
#if defined(ARCH_64)
-#define internal_ref_no_of_numbers(x) \
- (internal_ref_data((x))[0])
-#define internal_thing_ref_no_of_numbers(thing) \
- (internal_thing_ref_data(thing)[0])
-#define internal_ref_numbers(x) \
- (&internal_ref_data((x))[1])
-#define internal_thing_ref_numbers(thing) \
- (&internal_thing_ref_data(thing)[1])
-#define external_ref_no_of_numbers(x) \
+#define external_ref_no_numbers(x) \
(external_ref_data((x))[0])
-#define external_thing_ref_no_of_numbers(thing) \
+#define external_thing_ref_no_numbers(thing) \
(external_thing_ref_data(thing)[0])
#define external_ref_numbers(x) \
(&external_ref_data((x))[1])
@@ -277,12 +274,8 @@ extern ErtsPTab erts_port;
#else
-#define internal_ref_no_of_numbers(x) (internal_ref_data_words((x)))
-#define internal_thing_ref_no_of_numbers(t) (internal_thing_ref_data_words(t))
-#define internal_ref_numbers(x) (internal_ref_data((x)))
-#define internal_thing_ref_numbers(t) (internal_thing_ref_data(t))
-#define external_ref_no_of_numbers(x) (external_ref_data_words((x)))
-#define external_thing_ref_no_of_numbers(t) (external_thing_ref_data_words((t)))
+#define external_ref_no_numbers(x) (external_ref_data_words((x)))
+#define external_thing_ref_no_numbers(t) (external_thing_ref_data_words((t)))
#define external_ref_numbers(x) (external_ref_data((x)))
#define external_thing_ref_numbers(t) (external_thing_ref_data((t)))
@@ -299,15 +292,9 @@ extern ErtsPTab erts_port;
#define internal_ref_channel_no(x) (internal_channel_no((x)))
#define external_ref_channel_no(x) (external_channel_no((x)))
-#define ref_data_words(x) (is_internal_ref((x)) \
- ? internal_ref_data_words((x)) \
- : external_ref_data_words((x)))
-#define ref_data(x) (is_internal_ref((x)) \
- ? internal_ref_data((x)) \
- : external_ref_data((x)))
-#define ref_no_of_numbers(x) (is_internal_ref((x)) \
- ? internal_ref_no_of_numbers((x))\
- : external_ref_no_of_numbers((x)))
+#define ref_no_numbers(x) (is_internal_ref((x)) \
+ ? internal_ref_no_numbers((x))\
+ : external_ref_no_numbers((x)))
#define ref_numbers(x) (is_internal_ref((x)) \
? internal_ref_numbers((x)) \
: external_ref_numbers((x)))
@@ -331,5 +318,3 @@ extern ErtsPTab erts_port;
#define is_not_ref(x) (!is_ref(x))
#endif
-
-
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 646f786651..18ed782ae3 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -29,17 +29,22 @@
#include "error.h"
#include "erl_thr_progress.h"
#include "dtrace-wrapper.h"
+#include "erl_binary.h"
+#include "erl_bif_unique.h"
+#include "erl_proc_sig_queue.h"
Hash erts_dist_table;
Hash erts_node_table;
-erts_smp_rwmtx_t erts_dist_table_rwmtx;
-erts_smp_rwmtx_t erts_node_table_rwmtx;
+erts_rwmtx_t erts_dist_table_rwmtx;
+erts_rwmtx_t erts_node_table_rwmtx;
DistEntry *erts_hidden_dist_entries;
DistEntry *erts_visible_dist_entries;
+DistEntry *erts_pending_dist_entries;
DistEntry *erts_not_connected_dist_entries; /* including erts_this_dist_entry */
Sint erts_no_of_hidden_dist_entries;
Sint erts_no_of_visible_dist_entries;
+Sint erts_no_of_pending_dist_entries;
Sint erts_no_of_not_connected_dist_entries; /* including erts_this_dist_entry */
DistEntry *erts_this_dist_entry;
@@ -55,8 +60,66 @@ static int references_atoms_need_init = 1;
static ErtsMonotonicTime orig_node_tab_delete_delay;
static ErtsMonotonicTime node_tab_delete_delay;
+
+static void report_gc_active_dist_entry(Eterm sysname, enum dist_entry_state);
+
+
/* -- The distribution table ---------------------------------------------- */
+#define ErtsBin2DistEntry(B) \
+ ((DistEntry *) ERTS_MAGIC_BIN_DATA((B)))
+#define ErtsDistEntry2Bin(DEP) \
+ ((Binary *) ERTS_MAGIC_BIN_FROM_DATA((DEP)))
+
+static ERTS_INLINE erts_aint_t
+de_refc_read(DistEntry *dep, erts_aint_t min)
+{
+ return erts_refc_read(&ErtsDistEntry2Bin(dep)->intern.refc, min);
+}
+
+static ERTS_INLINE erts_aint_t
+de_refc_inc_read(DistEntry *dep, erts_aint_t min)
+{
+ return erts_refc_inctest(&ErtsDistEntry2Bin(dep)->intern.refc, min);
+}
+
+static ERTS_INLINE void
+de_refc_inc(DistEntry *dep, erts_aint_t min)
+{
+ erts_refc_inc(&ErtsDistEntry2Bin(dep)->intern.refc, min);
+}
+
+static ERTS_INLINE void
+de_refc_dec(DistEntry *dep, erts_aint_t min)
+{
+#ifdef DEBUG
+ (void) erts_refc_read(&ErtsDistEntry2Bin(dep)->intern.refc, min+1);
+#endif
+ erts_bin_release(ErtsDistEntry2Bin(dep));
+}
+
+static ERTS_INLINE erts_aint_t
+de_refc_dec_read(DistEntry *dep, erts_aint_t min)
+{
+ return erts_refc_dectest(&ErtsDistEntry2Bin(dep)->intern.refc, min);
+}
+
+void
+erts_ref_dist_entry(DistEntry *dep)
+{
+ ASSERT(dep);
+ if (de_refc_inc_read(dep, 1) == 1) {
+ de_refc_inc(dep, 2); /* Pending delete */
+ }
+}
+
+void
+erts_deref_dist_entry(DistEntry *dep)
+{
+ ASSERT(dep);
+ de_refc_dec(dep, 0);
+}
+
#ifdef DEBUG
static int
is_in_de_list(DistEntry *dep, DistEntry *dep_list)
@@ -85,47 +148,59 @@ dist_table_cmp(void *dep1, void *dep2)
static void*
dist_table_alloc(void *dep_tmpl)
{
- Eterm chnl_nr;
+ erts_aint_t refc;
Eterm sysname;
+ Binary *bin;
DistEntry *dep;
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
sysname = ((DistEntry *) dep_tmpl)->sysname;
- chnl_nr = make_small((Uint) atom_val(sysname));
- dep = (DistEntry *) erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry));
+
+ bin = erts_create_magic_binary_x(sizeof(DistEntry),
+ erts_dist_entry_destructor,
+ ERTS_ALC_T_DIST_ENTRY,
+ 0);
+ dep = ErtsBin2DistEntry(bin);
dist_entries++;
+ refc = de_refc_dec_read(dep, -1);
+ ASSERT(refc == -1); (void)refc;
+
dep->prev = NULL;
- erts_refc_init(&dep->refc, -1);
- erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
+ erts_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->sysname = sysname;
dep->cid = NIL;
+ erts_atomic_init_nob(&dep->input_handler, (erts_aint_t) NIL);
dep->connection_id = 0;
- dep->status = 0;
+ dep->state = ERTS_DE_STATE_IDLE;
dep->flags = 0;
dep->version = 0;
- erts_smp_mtx_init_x(&dep->lnk_mtx, "dist_entry_links", chnl_nr);
- dep->node_links = NULL;
- dep->nlinks = NULL;
- dep->monitors = NULL;
+ dep->mld = NULL;
- erts_smp_mtx_init_x(&dep->qlock, "dist_entry_out_queue", chnl_nr);
- dep->qflgs = 0;
- dep->qsize = 0;
+ erts_mtx_init(&dep->qlock, "dist_entry_out_queue", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_atomic32_init_nob(&dep->qflgs, 0);
+ erts_atomic_init_nob(&dep->qsize, 0);
+ erts_atomic64_init_nob(&dep->in, 0);
+ erts_atomic64_init_nob(&dep->out, 0);
dep->out_queue.first = NULL;
dep->out_queue.last = NULL;
dep->suspended = NULL;
+ dep->tmp_out_queue.first = NULL;
+ dep->tmp_out_queue.last = NULL;
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
- erts_smp_atomic_init_nob(&dep->dist_cmd_scheduled, 0);
+ erts_atomic_init_nob(&dep->dist_cmd_scheduled, 0);
erts_port_task_handle_init(&dep->dist_cmd);
dep->send = NULL;
dep->cache = NULL;
+ dep->transcode_ctx = NULL;
/* Link in */
@@ -148,10 +223,10 @@ dist_table_free(void *vdep)
{
DistEntry *dep = (DistEntry *) vdep;
+ ASSERT(de_refc_read(dep, -1) == -1);
+ ASSERT(dep->state == ERTS_DE_STATE_IDLE);
ASSERT(is_nil(dep->cid));
- ASSERT(dep->nlinks == NULL);
- ASSERT(dep->node_links == NULL);
- ASSERT(dep->monitors == NULL);
+ ASSERT(dep->mld == NULL);
/* Link out */
@@ -173,14 +248,13 @@ dist_table_free(void *vdep)
erts_no_of_not_connected_dist_entries--;
ASSERT(!dep->cache);
- erts_smp_rwmtx_destroy(&dep->rwmtx);
- erts_smp_mtx_destroy(&dep->lnk_mtx);
- erts_smp_mtx_destroy(&dep->qlock);
+ erts_rwmtx_destroy(&dep->rwmtx);
+ erts_mtx_destroy(&dep->qlock);
#ifdef DEBUG
sys_memset(vdep, 0x77, sizeof(DistEntry));
#endif
- erts_free(ERTS_ALC_T_DIST_ENTRY, (void *) dep);
+ erts_bin_free(ErtsDistEntry2Bin(dep));
ASSERT(dist_entries > 0);
dist_entries--;
@@ -188,29 +262,62 @@ dist_table_free(void *vdep)
void
-erts_dist_table_info(int to, void *to_arg)
+erts_dist_table_info(fmtfn_t to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
hash_info(to, to_arg, &erts_dist_table);
if (lock)
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
+}
+
+static ERTS_INLINE DistEntry *find_dist_entry(Eterm sysname,
+ int inc_refc,
+ int connected_only)
+{
+ DistEntry *res;
+ DistEntry de;
+ de.sysname = sysname;
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
+ res = hash_get(&erts_dist_table, (void *) &de);
+ if (res) {
+ if (connected_only && is_nil(res->cid))
+ res = NULL;
+ else {
+ int pend_delete;
+ erts_aint_t refc;
+ if (inc_refc) {
+ refc = de_refc_inc_read(res, 1);
+ pend_delete = refc < 2;
+ }
+ else {
+ refc = de_refc_read(res, 0);
+ pend_delete = refc < 1;
+ }
+ if (pend_delete) /* Pending delete */
+ de_refc_inc(res, 1);
+ }
+ }
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
+ return res;
}
DistEntry *
erts_channel_no_to_dist_entry(Uint cno)
{
+ /*
+ * Does NOT increase reference count!
+ */
+
/*
* For this node (and previous incarnations of this node),
* ERST_INTERNAL_CHANNEL_NO (will always be 0 I guess) is used as
* channel no. For other nodes, the atom index of the atom corresponding
* to the node name is used as channel no.
*/
- if(cno == ERST_INTERNAL_CHANNEL_NO) {
- erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ if (cno == ERST_INTERNAL_CHANNEL_NO)
return erts_this_dist_entry;
- }
if((cno > MAX_ATOM_INDEX)
|| (cno >= atom_table_size())
@@ -219,118 +326,244 @@ erts_channel_no_to_dist_entry(Uint cno)
/* cno is a valid atom index; find corresponding dist entry (if there
is one) */
- return erts_find_dist_entry(make_atom(cno));
+ return find_dist_entry(make_atom(cno), 0, 0);
}
-
DistEntry *
erts_sysname_to_connected_dist_entry(Eterm sysname)
{
- DistEntry de;
- DistEntry *res_dep;
- de.sysname = sysname;
-
- if(erts_this_dist_entry->sysname == sysname) {
- erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ /*
+ * Does NOT increase reference count!
+ */
+ if(erts_this_dist_entry->sysname == sysname)
return erts_this_dist_entry;
- }
-
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
- res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de);
- if (res_dep) {
- erts_aint_t refc = erts_refc_inctest(&res_dep->refc, 1);
- if (refc < 2) /* Pending delete */
- erts_refc_inc(&res_dep->refc, 1);
- }
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
- if (res_dep) {
- int deref;
- erts_smp_rwmtx_rlock(&res_dep->rwmtx);
- deref = is_nil(res_dep->cid);
- erts_smp_rwmtx_runlock(&res_dep->rwmtx);
- if (deref) {
- erts_deref_dist_entry(res_dep);
- res_dep = NULL;
- }
- }
- return res_dep;
+ return find_dist_entry(sysname, 0, 1);
}
DistEntry *erts_find_or_insert_dist_entry(Eterm sysname)
{
+ /*
+ * This function DOES increase reference count!
+ */
DistEntry *res;
DistEntry de;
erts_aint_t refc;
- res = erts_find_dist_entry(sysname);
+ res = find_dist_entry(sysname, 1, 0);
if (res)
return res;
de.sysname = sysname;
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rwlock(&erts_dist_table_rwmtx);
res = hash_put(&erts_dist_table, (void *) &de);
- refc = erts_refc_inctest(&res->refc, 0);
+ refc = de_refc_inc_read(res, 0);
if (refc < 2) /* New or pending delete */
- erts_refc_inc(&res->refc, 1);
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ de_refc_inc(res, 1);
+ erts_rwmtx_rwunlock(&erts_dist_table_rwmtx);
return res;
}
DistEntry *erts_find_dist_entry(Eterm sysname)
{
- DistEntry *res;
- DistEntry de;
- de.sysname = sysname;
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
- res = hash_get(&erts_dist_table, (void *) &de);
- if (res) {
- erts_aint_t refc = erts_refc_inctest(&res->refc, 1);
- if (refc < 2) /* Pending delete */
- erts_refc_inc(&res->refc, 1);
+ /*
+ * Does NOT increase reference count!
+ */
+ return find_dist_entry(sysname, 0, 0);
+}
+
+DistEntry *
+erts_dhandle_to_dist_entry(Eterm dhandle, Uint32 *conn_id)
+{
+ Eterm *tpl;
+ Binary *bin;
+
+ if (!is_boxed(dhandle))
+ return NULL;
+ tpl = boxed_val(dhandle);
+ if (tpl[0] != make_arityval(2) || !is_small(tpl[1])
+ || !is_internal_magic_ref(tpl[2]))
+ return NULL;
+ *conn_id = unsigned_val(tpl[1]);
+ bin = erts_magic_ref2bin(tpl[2]);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(bin) != erts_dist_entry_destructor)
+ return NULL;
+ return ErtsBin2DistEntry(bin);
+}
+
+Eterm
+erts_build_dhandle(Eterm **hpp, ErlOffHeap* ohp,
+ DistEntry *dep, Uint32 conn_id)
+{
+ Binary *bin = ErtsDistEntry2Bin(dep);
+ Eterm mref, dhandle;
+ ASSERT(bin);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == erts_dist_entry_destructor);
+ mref = erts_mk_magic_ref(hpp, ohp, bin);
+ dhandle = TUPLE2(*hpp, make_small(conn_id), mref);
+ *hpp += 3;
+ return dhandle;
+}
+
+Eterm
+erts_make_dhandle(Process *c_p, DistEntry *dep, Uint32 conn_id)
+{
+ Eterm *hp = HAlloc(c_p, ERTS_DHANDLE_SIZE);
+ return erts_build_dhandle(&hp, &c_p->off_heap, dep, conn_id);
+}
+
+static void start_timer_delete_dist_entry(void *vdep);
+static void prepare_try_delete_dist_entry(void *vdep);
+static void try_delete_dist_entry(DistEntry*);
+
+static void schedule_delete_dist_entry(DistEntry* dep)
+{
+ /*
+ * Here we need thread progress to wait for other threads, that may have
+ * done lookup without refc++, to do either refc++ or drop their refs.
+ *
+ * Note that timeouts do not guarantee thread progress.
+ */
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_schedule_thr_prgr_later_op(start_timer_delete_dist_entry,
+ dep, &dep->later_op);
+ } else {
+ /*
+ * Since OTP 20, it's possible that destructor is executed on
+ * a dirty scheduler. Aux work cannot be done on a dirty
+ * scheduler, and scheduling any aux work on a dirty scheduler
+ * makes the scheduler to loop infinitely.
+ * To avoid this, make a spot jump: schedule this function again
+ * on a first normal scheduler. It is guaranteed to be always
+ * online. Since it's a rare event, this shall not pose a big
+ * utilisation hit.
+ */
+ erts_schedule_misc_aux_work(1,
+ (void (*)(void *))schedule_delete_dist_entry,
+ (void *) dep);
}
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
- return res;
}
-static void try_delete_dist_entry(void *vdep)
+static void
+start_timer_delete_dist_entry(void *vdep)
+{
+ if (node_tab_delete_delay == 0) {
+ prepare_try_delete_dist_entry(vdep);
+ }
+ else {
+ ASSERT(node_tab_delete_delay > 0);
+ erts_start_timer_callback(node_tab_delete_delay,
+ prepare_try_delete_dist_entry,
+ vdep);
+ }
+}
+
+static void
+prepare_try_delete_dist_entry(void *vdep)
+{
+ DistEntry *dep = vdep;
+ erts_aint_t refc;
+
+ /*
+ * Time has passed since we decremented refc to zero and DistEntry may
+ * have been revived. Do a fast check without table lock first.
+ */
+ refc = de_refc_read(dep, 0);
+ if (refc == 0) {
+ try_delete_dist_entry(dep);
+ }
+ else {
+ /*
+ * Someone has done lookup and done refc++ for us.
+ */
+ refc = de_refc_dec_read(dep, 0);
+ if (refc == 0)
+ schedule_delete_dist_entry(dep);
+ }
+}
+
+static void try_delete_dist_entry(DistEntry* dep)
{
- DistEntry *dep = (DistEntry *) vdep;
erts_aint_t refc;
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ erts_de_rwlock(dep);
+ if (dep->state != ERTS_DE_STATE_IDLE && de_refc_read(dep,0) == 0) {
+ Eterm sysname = dep->sysname;
+ enum dist_entry_state state = dep->state;
+
+ if (dep->state != ERTS_DE_STATE_PENDING)
+ ERTS_INTERNAL_ERROR("Garbage collecting connected distribution entry");
+ erts_abort_connection_rwunlock(dep);
+ report_gc_active_dist_entry(sysname, state);
+ }
+ else
+ erts_de_rwunlock(dep);
+
+ erts_rwmtx_rwlock(&erts_dist_table_rwmtx);
/*
* Another thread might have looked up this dist entry after
* we decided to delete it (refc became zero). If so, the other
- * thread incremented refc twice. Once for the new reference
- * and once for this thread.
+ * thread incremented refc one extra step for this thread.
*
- * If refc reach -1, no one has used the entry since we
- * set up the timer. Delete the entry.
+ * If refc reach -1, no one has done lookup and no one can do lookup
+ * as we have table lock. Delete the entry.
*
- * If refc reach 0, the entry is currently not in use
- * but has been used since we set up the timer. Set up a
- * new timer.
+ * If refc reach 0, someone raced us and either
+ * (1) did lookup with own refc++ and already released it again
+ * (2) did lookup without own refc++
+ * Schedule new delete operation.
*
* If refc > 0, the entry is in use. Keep the entry.
*/
- refc = erts_refc_dectest(&dep->refc, -1);
+ refc = de_refc_dec_read(dep, -1);
if (refc == -1)
(void) hash_erase(&erts_dist_table, (void *) dep);
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rwunlock(&erts_dist_table_rwmtx);
- if (refc == 0)
- erts_schedule_delete_dist_entry(dep);
+ if (refc == 0) {
+ schedule_delete_dist_entry(dep);
+ }
}
-void erts_schedule_delete_dist_entry(DistEntry *dep)
+static void report_gc_active_dist_entry(Eterm sysname,
+ enum dist_entry_state state)
{
- ASSERT(dep != erts_this_dist_entry);
- if (dep != erts_this_dist_entry) {
- if (node_tab_delete_delay == 0)
- try_delete_dist_entry((void *) dep);
- else if (node_tab_delete_delay > 0)
- erts_start_timer_callback(node_tab_delete_delay,
- try_delete_dist_entry,
- (void *) dep);
+ char *state_str;
+ erts_dsprintf_buf_t *dsbuf = erts_create_logger_dsbuf();
+ switch (state) {
+ case ERTS_DE_STATE_CONNECTED:
+ state_str = "connected";
+ break;
+ case ERTS_DE_STATE_PENDING:
+ state_str = "pending connect";
+ break;
+ case ERTS_DE_STATE_EXITING:
+ state_str = "exiting";
+ break;
+ case ERTS_DE_STATE_IDLE:
+ state_str = "idle";
+ break;
+ default:
+ state_str = "unknown";
+ break;
}
+ erts_dsprintf(dsbuf, "Garbage collecting distribution "
+ "entry for node %T in state: %s",
+ sysname, state_str);
+ erts_send_error_to_logger_nogl(dsbuf);
+}
+
+int erts_dist_entry_destructor(Binary *bin)
+{
+ DistEntry *dep = ErtsBin2DistEntry(bin);
+ erts_aint_t refc;
+
+ refc = de_refc_read(dep, -1);
+
+ if (refc == -1)
+ return 1; /* Allow deallocation of structure... */
+
+ schedule_delete_dist_entry(dep);
+
+ return 0;
}
Uint
@@ -345,7 +578,7 @@ erts_dist_table_size(void)
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
#ifdef DEBUG
hash_get_info(&hi, &erts_dist_table);
ASSERT(dist_entries == hi.objs);
@@ -359,12 +592,17 @@ erts_dist_table_size(void)
i++;
ASSERT(i == erts_no_of_hidden_dist_entries);
i = 0;
+ for(dep = erts_pending_dist_entries; dep; dep = dep->next)
+ i++;
+ ASSERT(i == erts_no_of_pending_dist_entries);
+ i = 0;
for(dep = erts_not_connected_dist_entries; dep; dep = dep->next)
i++;
ASSERT(i == erts_no_of_not_connected_dist_entries);
ASSERT(dist_entries == (erts_no_of_visible_dist_entries
+ erts_no_of_hidden_dist_entries
+ + erts_no_of_pending_dist_entries
+ erts_no_of_not_connected_dist_entries));
#endif
@@ -372,50 +610,53 @@ erts_dist_table_size(void)
+ dist_entries*sizeof(DistEntry)
+ erts_dist_cache_size());
if (lock)
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
return res;
}
void
erts_set_dist_entry_not_connected(DistEntry *dep)
{
- ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ DistEntry** head;
- ASSERT(dep != erts_this_dist_entry);
- ASSERT(is_internal_port(dep->cid));
+ ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
+ erts_rwmtx_rwlock(&erts_dist_table_rwmtx);
- if(dep->flags & DFLAG_PUBLISHED) {
- if(dep->prev) {
- ASSERT(is_in_de_list(dep, erts_visible_dist_entries));
- dep->prev->next = dep->next;
- }
- else {
- ASSERT(erts_visible_dist_entries == dep);
- erts_visible_dist_entries = dep->next;
- }
+ ASSERT(dep != erts_this_dist_entry);
- ASSERT(erts_no_of_visible_dist_entries > 0);
- erts_no_of_visible_dist_entries--;
+ if (dep->state == ERTS_DE_STATE_PENDING) {
+ ASSERT(is_nil(dep->cid));
+ ASSERT(erts_no_of_pending_dist_entries > 0);
+ erts_no_of_pending_dist_entries--;
+ head = &erts_pending_dist_entries;
}
else {
- if(dep->prev) {
- ASSERT(is_in_de_list(dep, erts_hidden_dist_entries));
- dep->prev->next = dep->next;
- }
- else {
- ASSERT(erts_hidden_dist_entries == dep);
- erts_hidden_dist_entries = dep->next;
- }
-
- ASSERT(erts_no_of_hidden_dist_entries > 0);
- erts_no_of_hidden_dist_entries--;
+ ASSERT(dep->state != ERTS_DE_STATE_IDLE);
+ ASSERT(is_internal_port(dep->cid) || is_internal_pid(dep->cid));
+ if (dep->flags & DFLAG_PUBLISHED) {
+ ASSERT(erts_no_of_visible_dist_entries > 0);
+ erts_no_of_visible_dist_entries--;
+ head = &erts_visible_dist_entries;
+ }
+ else {
+ ASSERT(erts_no_of_hidden_dist_entries > 0);
+ erts_no_of_hidden_dist_entries--;
+ head = &erts_hidden_dist_entries;
+ }
}
+ if(dep->prev) {
+ ASSERT(is_in_de_list(dep, *head));
+ dep->prev->next = dep->next;
+ }
+ else {
+ ASSERT(*head == dep);
+ *head = dep->next;
+ }
if(dep->next)
dep->next->prev = dep->prev;
- dep->status &= ~ERTS_DE_SFLG_CONNECTED;
+ dep->state = ERTS_DE_STATE_IDLE;
dep->flags = 0;
dep->prev = NULL;
dep->cid = NIL;
@@ -427,41 +668,98 @@ erts_set_dist_entry_not_connected(DistEntry *dep)
}
erts_not_connected_dist_entries = dep;
erts_no_of_not_connected_dist_entries++;
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+}
+
+void
+erts_set_dist_entry_pending(DistEntry *dep)
+{
+ ErtsMonLnkDist *mld = erts_mon_link_dist_create(dep->sysname);
+ ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
+
+ erts_rwmtx_rwlock(&erts_dist_table_rwmtx);
+
+ ASSERT(dep != erts_this_dist_entry);
+ ASSERT(dep->state == ERTS_DE_STATE_IDLE);
+ ASSERT(is_nil(dep->cid));
+
+ if(dep->prev) {
+ ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries));
+ dep->prev->next = dep->next;
+ }
+ else {
+ ASSERT(dep == erts_not_connected_dist_entries);
+ erts_not_connected_dist_entries = dep->next;
+ }
+
+ if(dep->next)
+ dep->next->prev = dep->prev;
+
+ erts_no_of_not_connected_dist_entries--;
+
+ dep->state = ERTS_DE_STATE_PENDING;
+ dep->flags = (DFLAG_DIST_MANDATORY | DFLAG_DIST_HOPEFULLY | DFLAG_NO_MAGIC);
+ dep->connection_id = (dep->connection_id + 1) & ERTS_DIST_CON_ID_MASK;
+
+ ASSERT(!dep->mld);
+ mld->connection_id = dep->connection_id;
+ dep->mld = mld;
+
+ dep->prev = NULL;
+ dep->next = erts_pending_dist_entries;
+ if(erts_pending_dist_entries) {
+ ASSERT(erts_pending_dist_entries->prev == NULL);
+ erts_pending_dist_entries->prev = dep;
+ }
+ erts_pending_dist_entries = dep;
+ erts_no_of_pending_dist_entries++;
+ erts_rwmtx_rwunlock(&erts_dist_table_rwmtx);
}
void
erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags)
{
- ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ erts_aint32_t set_qflgs;
+
+ ASSERT(dep->mld);
+
+ ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
+ erts_rwmtx_rwlock(&erts_dist_table_rwmtx);
ASSERT(dep != erts_this_dist_entry);
ASSERT(is_nil(dep->cid));
- ASSERT(is_internal_port(cid));
+ ASSERT(dep->state == ERTS_DE_STATE_PENDING);
+ ASSERT(is_internal_port(cid) || is_internal_pid(cid));
if(dep->prev) {
- ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries));
+ ASSERT(is_in_de_list(dep, erts_pending_dist_entries));
dep->prev->next = dep->next;
}
else {
- ASSERT(erts_not_connected_dist_entries == dep);
- erts_not_connected_dist_entries = dep->next;
+ ASSERT(erts_pending_dist_entries == dep);
+ erts_pending_dist_entries = dep->next;
}
if(dep->next)
dep->next->prev = dep->prev;
- ASSERT(erts_no_of_not_connected_dist_entries > 0);
- erts_no_of_not_connected_dist_entries--;
+ ASSERT(erts_no_of_pending_dist_entries > 0);
+ erts_no_of_pending_dist_entries--;
- dep->status |= ERTS_DE_SFLG_CONNECTED;
- dep->flags = flags;
+ dep->state = ERTS_DE_STATE_CONNECTED;
+ dep->flags = flags & ~DFLAG_NO_MAGIC;
dep->cid = cid;
- dep->connection_id++;
- dep->connection_id &= ERTS_DIST_EXT_CON_ID_MASK;
+ erts_atomic_set_nob(&dep->input_handler,
+ (erts_aint_t) cid);
+
dep->prev = NULL;
+ erts_atomic64_set_nob(&dep->in, 0);
+ erts_atomic64_set_nob(&dep->out, 0);
+ set_qflgs = (is_internal_port(cid) ?
+ ERTS_DE_QFLG_PORT_CTRL : ERTS_DE_QFLG_PROC_CTRL);
+ erts_atomic32_read_bor_nob(&dep->qflgs, set_qflgs);
+
if(flags & DFLAG_PUBLISHED) {
dep->next = erts_visible_dist_entries;
if(erts_visible_dist_entries) {
@@ -480,7 +778,7 @@ erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags)
erts_hidden_dist_entries = dep;
erts_no_of_hidden_dist_entries++;
}
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rwunlock(&erts_dist_table_rwmtx);
}
/* -- Node table --------------------------------------------------------- */
@@ -531,7 +829,7 @@ node_table_free(void *venp)
{
ErlNode *enp = (ErlNode *) venp;
- ERTS_SMP_LC_ASSERT(enp != erts_this_node || erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(enp != erts_this_node || erts_thr_progress_is_blocking());
erts_deref_dist_entry(enp->dist_entry);
#ifdef DEBUG
@@ -552,26 +850,26 @@ erts_node_table_size(void)
#endif
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rlock(&erts_node_table_rwmtx);
#ifdef DEBUG
hash_get_info(&hi, &erts_node_table);
ASSERT(node_entries == hi.objs);
#endif
res = hash_table_sz(&erts_node_table) + node_entries*sizeof(ErlNode);
if (lock)
- erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
+ erts_rwmtx_runlock(&erts_node_table_rwmtx);
return res;
}
void
-erts_node_table_info(int to, void *to_arg)
+erts_node_table_info(fmtfn_t to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rlock(&erts_node_table_rwmtx);
hash_info(to, to_arg, &erts_node_table);
if (lock)
- erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
+ erts_rwmtx_runlock(&erts_node_table_rwmtx);
}
@@ -582,18 +880,18 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation)
ne.sysname = sysname;
ne.creation = creation;
- erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rlock(&erts_node_table_rwmtx);
res = hash_get(&erts_node_table, (void *) &ne);
if (res && res != erts_this_node) {
erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
if (refc < 2) /* New or pending delete */
erts_refc_inc(&res->refc, 1);
}
- erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
+ erts_rwmtx_runlock(&erts_node_table_rwmtx);
if (res)
return res;
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rwlock(&erts_node_table_rwmtx);
res = hash_put(&erts_node_table, (void *) &ne);
ASSERT(res);
if (res != erts_this_node) {
@@ -601,7 +899,7 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation)
if (refc < 2) /* New or pending delete */
erts_refc_inc(&res->refc, 1);
}
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rwunlock(&erts_node_table_rwmtx);
return res;
}
@@ -610,7 +908,7 @@ static void try_delete_node(void *venp)
ErlNode *enp = (ErlNode *) venp;
erts_aint_t refc;
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rwlock(&erts_node_table_rwmtx);
/*
* Another thread might have looked up this node after we
* decided to delete it (refc became zero). If so, the other
@@ -629,7 +927,7 @@ static void try_delete_node(void *venp)
refc = erts_refc_dectest(&enp->refc, -1);
if (refc == -1)
(void) hash_erase(&erts_node_table, (void *) enp);
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rwunlock(&erts_node_table_rwmtx);
if (refc == 0)
erts_schedule_delete_node(enp);
@@ -649,7 +947,7 @@ void erts_schedule_delete_node(ErlNode *enp)
}
struct pn_data {
- int to;
+ fmtfn_t to;
void *to_arg;
Eterm sysname;
int no_sysname;
@@ -679,7 +977,7 @@ static void print_node(void *venp, void *vpndp)
pndp->no_total++;
}
-void erts_print_node_info(int to,
+void erts_print_node_info(fmtfn_t to,
void *to_arg,
Eterm sysname,
int *no_sysname,
@@ -695,13 +993,13 @@ void erts_print_node_info(int to,
pnd.no_total = 0;
if (lock)
- erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
+ erts_rwmtx_rlock(&erts_node_table_rwmtx);
hash_foreach(&erts_node_table, print_node, (void *) &pnd);
if (pnd.no_sysname != 0) {
erts_print(to, to_arg, "\n");
}
if (lock)
- erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
+ erts_rwmtx_runlock(&erts_node_table_rwmtx);
if(no_sysname)
*no_sysname = pnd.no_sysname;
@@ -714,20 +1012,19 @@ void erts_print_node_info(int to,
void
erts_set_this_node(Eterm sysname, Uint creation)
{
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking());
- ASSERT(erts_refc_read(&erts_this_dist_entry->refc, 2));
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
+ ASSERT(2 <= de_refc_read(erts_this_dist_entry, 2));
if (erts_refc_dectest(&erts_this_node->refc, 0) == 0)
try_delete_node(erts_this_node);
- if (erts_refc_dectest(&erts_this_dist_entry->refc, 0) == 0)
- try_delete_dist_entry(erts_this_dist_entry);
+ erts_deref_dist_entry(erts_this_dist_entry);
erts_this_node = NULL; /* to make sure refc is bumped for this node */
erts_this_node = erts_find_or_insert_node(sysname, creation);
erts_this_dist_entry = erts_this_node->dist_entry;
- erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ erts_ref_dist_entry(erts_this_dist_entry);
erts_this_node_sysname = erts_this_node_sysname_BUFFER;
erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname_BUFFER),
@@ -746,7 +1043,7 @@ erts_delayed_node_table_gc(void)
void erts_init_node_tables(int dd_sec)
{
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
HashFunctions f;
ErlNode node_tmpl;
@@ -757,11 +1054,13 @@ void erts_init_node_tables(int dd_sec)
orig_node_tab_delete_delay = node_tab_delete_delay;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table");
- erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table");
+ erts_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
f.hash = (H_FUN) dist_table_hash;
f.cmp = (HCMP_FUN) dist_table_cmp;
@@ -780,9 +1079,11 @@ void erts_init_node_tables(int dd_sec)
erts_hidden_dist_entries = NULL;
erts_visible_dist_entries = NULL;
+ erts_pending_dist_entries = NULL;
erts_not_connected_dist_entries = NULL;
erts_no_of_hidden_dist_entries = 0;
erts_no_of_visible_dist_entries = 0;
+ erts_no_of_pending_dist_entries = 0;
erts_no_of_not_connected_dist_entries = 0;
node_tmpl.sysname = am_Noname;
@@ -794,9 +1095,9 @@ void erts_init_node_tables(int dd_sec)
ASSERT(erts_this_node->dist_entry != NULL);
erts_this_dist_entry = erts_this_node->dist_entry;
/* +1 for erts_this_dist_entry */
- /* +1 for erts_this_node->dist_entry */
- erts_refc_init(&erts_this_dist_entry->refc, 2);
+ erts_ref_dist_entry(erts_this_dist_entry);
+ ASSERT(2 == de_refc_read(erts_this_dist_entry, 2));
erts_this_node_sysname = erts_this_node_sysname_BUFFER;
erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname_BUFFER),
@@ -805,17 +1106,44 @@ void erts_init_node_tables(int dd_sec)
references_atoms_need_init = 1;
}
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
int erts_lc_is_de_rwlocked(DistEntry *dep)
{
- return erts_smp_lc_rwmtx_is_rwlocked(&dep->rwmtx);
+ return erts_lc_rwmtx_is_rwlocked(&dep->rwmtx);
}
int erts_lc_is_de_rlocked(DistEntry *dep)
{
- return erts_smp_lc_rwmtx_is_rlocked(&dep->rwmtx);
+ return erts_lc_rwmtx_is_rlocked(&dep->rwmtx);
}
#endif
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+static void erts_lcnt_enable_dist_lock_count(void *dep_raw, void *enable) {
+ DistEntry *dep = (DistEntry*)dep_raw;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&dep->rwmtx.lcnt, "dist_entry", dep->sysname,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_lcnt_install_new_lock_info(&dep->qlock.lcnt, "dist_entry_out_queue", dep->sysname,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ if (dep->mld)
+ erts_lcnt_install_new_lock_info(&dep->mld->mtx.lcnt, "dist_entry_links", dep->sysname,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ } else {
+ erts_lcnt_uninstall(&dep->rwmtx.lcnt);
+ erts_lcnt_uninstall(&dep->qlock.lcnt);
+ if (dep->mld)
+ erts_lcnt_uninstall(&dep->mld->mtx.lcnt);
+ }
+}
+
+void erts_lcnt_update_distribution_locks(int enable) {
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
+ hash_foreach(&erts_dist_table, erts_lcnt_enable_dist_lock_count,
+ (void*)(UWord)enable);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
+}
#endif
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
@@ -848,16 +1176,19 @@ static Eterm AM_node_references;
static Eterm AM_system;
static Eterm AM_timer;
static Eterm AM_delayed_delete_timer;
+static Eterm AM_thread_progress_delete_timer;
+static Eterm AM_signal;
static void setup_reference_table(void);
-static Eterm reference_table_term(Uint **hpp, Uint *szp);
+static Eterm reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp);
static void delete_reference_table(void);
-#if BIG_UINT_HEAP_SIZE > 3 /* 2-tuple */
-#define ID_HEAP_SIZE BIG_UINT_HEAP_SIZE
-#else
-#define ID_HEAP_SIZE 3 /* 2-tuple */
-#endif
+#undef ERTS_MAX__
+#define ERTS_MAX__(A, B) ((A) > (B) ? (A) : (B))
+
+#define ID_HEAP_SIZE \
+ ERTS_MAX__(ERTS_MAGIC_REF_THING_SIZE, \
+ ERTS_MAX__(BIG_UINT_HEAP_SIZE, 3))
typedef struct node_referrer_ {
struct node_referrer_ *next;
@@ -868,8 +1199,10 @@ typedef struct node_referrer_ {
int bin_ref;
int timer_ref;
int system_ref;
+ int signal_ref;
Eterm id;
Uint id_heap[ID_HEAP_SIZE];
+ ErlOffHeap off_heap;
} NodeReferrer;
typedef struct {
@@ -880,9 +1213,11 @@ typedef struct {
typedef struct dist_referrer_ {
struct dist_referrer_ *next;
int heap_ref;
+ int ets_ref;
int node_ref;
int ctrl_ref;
int system_ref;
+ int signal_ref;
Eterm id;
Uint creation;
Uint id_heap[ID_HEAP_SIZE];
@@ -914,8 +1249,8 @@ erts_get_node_and_dist_references(struct process *proc)
Uint *endp;
#endif
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
/* No need to lock any thing since we are alone... */
if (references_atoms_need_init) {
@@ -935,6 +1270,8 @@ erts_get_node_and_dist_references(struct process *proc)
INIT_AM(timer);
INIT_AM(system);
INIT_AM(delayed_delete_timer);
+ INIT_AM(thread_progress_delete_timer);
+ INIT_AM(signal);
references_atoms_need_init = 0;
}
@@ -942,7 +1279,7 @@ erts_get_node_and_dist_references(struct process *proc)
/* Get term size */
size = 0;
- (void) reference_table_term(NULL, &size);
+ (void) reference_table_term(NULL, NULL, &size);
hp = HAlloc(proc, size);
#ifdef DEBUG
@@ -951,14 +1288,14 @@ erts_get_node_and_dist_references(struct process *proc)
#endif
/* Write term */
- res = reference_table_term(&hp, NULL);
+ res = reference_table_term(&hp, &proc->off_heap, NULL);
ASSERT(endp == hp);
delete_reference_table();
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
return res;
}
@@ -971,6 +1308,7 @@ erts_get_node_and_dist_references(struct process *proc)
#define MONITOR_REF 7
#define TIMER_REF 8
#define SYSTEM_REF 9
+#define SIGNAL_REF 10
#define INC_TAB_SZ 10
@@ -997,20 +1335,24 @@ insert_dist_referrer(ReferredDist *referred_dist,
else {
Uint *hp = &drp->id_heap[0];
ASSERT(is_tuple(id));
- drp->id = copy_struct(id, size_object(id), &hp, NULL);
+ drp->id = copy_struct(id, size_object(id), &hp, NULL);
}
drp->creation = creation;
drp->heap_ref = 0;
+ drp->ets_ref = 0;
drp->node_ref = 0;
drp->ctrl_ref = 0;
drp->system_ref = 0;
+ drp->signal_ref = 0;
}
switch (type) {
case NODE_REF: drp->node_ref++; break;
case CTRL_REF: drp->ctrl_ref++; break;
case HEAP_REF: drp->heap_ref++; break;
+ case ETS_REF: drp->ets_ref++; break;
case SYSTEM_REF: drp->system_ref++; break;
+ case SIGNAL_REF: drp->signal_ref++; break;
default: ASSERT(0);
}
}
@@ -1048,13 +1390,14 @@ insert_node_referrer(ReferredNode *referred_node, int type, Eterm id)
nrp = (NodeReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP,
sizeof(NodeReferrer));
nrp->next = referred_node->referrers;
+ ERTS_INIT_OFF_HEAP(&nrp->off_heap);
referred_node->referrers = nrp;
if(IS_CONST(id))
nrp->id = id;
else {
Uint *hp = &nrp->id_heap[0];
- ASSERT(is_big(id) || is_tuple(id));
- nrp->id = copy_struct(id, size_object(id), &hp, NULL);
+ ASSERT(is_big(id) || is_tuple(id) || is_internal_magic_ref(id));
+ nrp->id = copy_struct(id, size_object(id), &hp, &nrp->off_heap);
}
nrp->heap_ref = 0;
nrp->link_ref = 0;
@@ -1063,6 +1406,7 @@ insert_node_referrer(ReferredNode *referred_node, int type, Eterm id)
nrp->bin_ref = 0;
nrp->timer_ref = 0;
nrp->system_ref = 0;
+ nrp->signal_ref = 0;
}
switch (type) {
@@ -1073,6 +1417,7 @@ insert_node_referrer(ReferredNode *referred_node, int type, Eterm id)
case MONITOR_REF: nrp->monitor_ref++; break;
case TIMER_REF: nrp->timer_ref++; break;
case SYSTEM_REF: nrp->system_ref++; break;
+ case SIGNAL_REF: nrp->signal_ref++; break;
default: ASSERT(0);
}
}
@@ -1117,6 +1462,15 @@ insert_offheap2(ErlOffHeap *oh, void *arg)
insert_offheap(oh, a->type, a->id);
}
+#define ErtsIsDistEntryBinary(Bin) \
+ (((Bin)->intern.flags & BIN_FLAG_MAGIC) \
+ && ERTS_MAGIC_BIN_DESTRUCTOR((Bin)) == erts_dist_entry_destructor)
+
+#define IsSendCtxBinary(Bin) \
+ (((Bin)->intern.flags & BIN_FLAG_MAGIC) \
+ && ERTS_MAGIC_BIN_DESTRUCTOR((Bin)) == erts_dsend_context_dtor)
+
+
static void
insert_offheap(ErlOffHeap *oh, int type, Eterm id)
{
@@ -1126,12 +1480,15 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) {
switch (thing_subtag(u.hdr->thing_word)) {
- case REFC_BINARY_SUBTAG:
- if(IsMatchProgBinary(u.pb->val)) {
+ case REF_SUBTAG:
+ if (ErtsIsDistEntryBinary(u.mref->mb))
+ insert_dist_entry(ErtsBin2DistEntry(u.mref->mb),
+ type, id, 0);
+ else if(IsMatchProgBinary(u.mref->mb)) {
InsertedBin *ib;
int insert_bin = 1;
for (ib = inserted_bins; ib; ib = ib->next)
- if(ib->bin_val == u.pb->val) {
+ if(ib->bin_val == (Binary *) u.mref->mb) {
insert_bin = 0;
break;
}
@@ -1140,18 +1497,24 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
Uint *hp = &id_heap[0];
InsertedBin *nib;
UseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
- a.id = erts_bld_uint(&hp, NULL, (Uint) u.pb->val);
- erts_match_prog_foreach_offheap(u.pb->val,
+ a.id = erts_bld_uint(&hp, NULL, (Uint) u.mref->mb);
+ erts_match_prog_foreach_offheap((Binary *) u.mref->mb,
insert_offheap2,
(void *) &a);
nib = erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(InsertedBin));
- nib->bin_val = u.pb->val;
+ nib->bin_val = (Binary *) u.mref->mb;
nib->next = inserted_bins;
inserted_bins = nib;
UnUseTmpHeapNoproc(BIG_UINT_HEAP_SIZE);
}
- }
+ }
+ else if (IsSendCtxBinary(u.mref->mb)) {
+ ErtsSendContext* ctx = ERTS_MAGIC_BIN_DATA(u.mref->mb);
+ if (ctx->deref_dep)
+ insert_dist_entry(ctx->dep, type, id, 0);
+ }
break;
+ case REFC_BINARY_SUBTAG:
case FUN_SUBTAG:
break; /* No need to */
default:
@@ -1162,58 +1525,164 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id)
}
}
-static void doit_insert_monitor(ErtsMonitor *monitor, void *p)
+static void insert_monitor_data(ErtsMonitor *mon, int type, Eterm id)
+{
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
+ if ((mdp->origin.flags & (ERTS_ML_FLG_DBG_VISITED
+ | ERTS_ML_FLG_EXTENDED)) == ERTS_ML_FLG_EXTENDED) {
+ if (mon->type != ERTS_MON_TYPE_NODE) {
+ ErtsMonitorDataExtended *mdep = (ErtsMonitorDataExtended *) mdp;
+ ASSERT(mon->flags & ERTS_ML_FLG_EXTENDED);
+ if (mdep->uptr.ohhp) {
+ ErlOffHeap oh;
+ ERTS_INIT_OFF_HEAP(&oh);
+ oh.first = mdep->uptr.ohhp;
+ insert_offheap(&oh, type, id);
+ }
+ }
+ }
+ mdp->origin.flags |= ERTS_ML_FLG_DBG_VISITED;
+}
+
+static void insert_monitor(ErtsMonitor *mon, void *idp)
+{
+ Eterm id = *((Eterm *) idp);
+ insert_monitor_data(mon, MONITOR_REF, id);
+}
+
+static void clear_visited_monitor(ErtsMonitor *mon, void *p)
+{
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
+ mdp->origin.flags &= ~ERTS_ML_FLG_DBG_VISITED;
+}
+
+static void
+insert_p_monitors(ErtsPTabElementCommon *p)
+{
+ Eterm id = p->id;
+ erts_monitor_tree_foreach(p->u.alive.monitors,
+ insert_monitor,
+ (void *) &id);
+ erts_monitor_list_foreach(p->u.alive.lt_monitors,
+ insert_monitor,
+ (void *) &id);
+}
+
+static void
+insert_dist_monitors(DistEntry *dep)
+{
+ if (dep->mld) {
+ erts_monitor_list_foreach(dep->mld->monitors,
+ insert_monitor,
+ (void *) &dep->sysname);
+ erts_monitor_tree_foreach(dep->mld->orig_name_monitors,
+ insert_monitor,
+ (void *) &dep->sysname);
+ }
+}
+
+static void
+clear_visited_p_monitors(ErtsPTabElementCommon *p)
+{
+ erts_monitor_tree_foreach(p->u.alive.monitors,
+ clear_visited_monitor,
+ NULL);
+ erts_monitor_list_foreach(p->u.alive.lt_monitors,
+ clear_visited_monitor,
+ NULL);
+}
+
+static void
+clear_visited_dist_monitors(DistEntry *dep)
+{
+ if (dep->mld) {
+ erts_monitor_list_foreach(dep->mld->monitors,
+ clear_visited_monitor,
+ NULL);
+ erts_monitor_tree_foreach(dep->mld->orig_name_monitors,
+ clear_visited_monitor,
+ NULL);
+ }
+}
+
+static void insert_link_data(ErtsLink *lnk, int type, Eterm id)
{
- Eterm *idp = p;
- if(is_external(monitor->pid))
- insert_node(external_thing_ptr(monitor->pid)->node, MONITOR_REF, *idp);
- if(is_external(monitor->ref))
- insert_node(external_thing_ptr(monitor->ref)->node, MONITOR_REF, *idp);
+ ErtsLinkData *ldp = erts_link_to_data(lnk);
+ if ((ldp->a.flags & (ERTS_ML_FLG_DBG_VISITED
+ | ERTS_ML_FLG_EXTENDED)) == ERTS_ML_FLG_EXTENDED) {
+ ErtsLinkDataExtended *ldep = (ErtsLinkDataExtended *) ldp;
+ if (ldep->ohhp) {
+ ErlOffHeap oh;
+ ERTS_INIT_OFF_HEAP(&oh);
+ oh.first = ldep->ohhp;
+ insert_offheap(&oh, type, id);
+ }
+ }
+ ldp->a.flags |= ERTS_ML_FLG_DBG_VISITED;
}
-static void doit_insert_link(ErtsLink *lnk, void *p)
+static void insert_link(ErtsLink *lnk, void *idp)
{
- Eterm *idp = p;
- if(is_external(lnk->pid))
- insert_node(external_thing_ptr(lnk->pid)->node, LINK_REF,
- *idp);
+ Eterm id = *((Eterm *) idp);
+ insert_link_data(lnk, LINK_REF, id);
}
+static void clear_visited_link(ErtsLink *lnk, void *p)
+{
+ ErtsLinkData *ldp = erts_link_to_data(lnk);
+ ldp->a.flags &= ~ERTS_ML_FLG_DBG_VISITED;
+}
static void
-insert_monitors(ErtsMonitor *monitors, Eterm id)
+insert_p_links(ErtsPTabElementCommon *p)
{
- erts_doforall_monitors(monitors,&doit_insert_monitor,&id);
+ Eterm id = p->id;
+ erts_link_tree_foreach(p->u.alive.links, insert_link, (void *) &id);
}
static void
-insert_links(ErtsLink *lnk, Eterm id)
+insert_dist_links(DistEntry *dep)
{
- erts_doforall_links(lnk,&doit_insert_link,&id);
+ if (dep->mld)
+ erts_link_list_foreach(dep->mld->links,
+ insert_link,
+ (void *) &dep->sysname);
}
-static void doit_insert_link2(ErtsLink *lnk, void *p)
+static void
+clear_visited_p_links(ErtsPTabElementCommon *p)
{
- Eterm *idp = p;
- if(is_external(lnk->pid))
- insert_node(external_thing_ptr(lnk->pid)->node, LINK_REF,
- *idp);
- insert_links(ERTS_LINK_ROOT(lnk), *idp);
+ erts_link_tree_foreach(p->u.alive.links,
+ clear_visited_link,
+ NULL);
}
static void
-insert_links2(ErtsLink *lnk, Eterm id)
+clear_visited_dist_links(DistEntry *dep)
{
- erts_doforall_links(lnk,&doit_insert_link2,&id);
+ if (dep->mld)
+ erts_link_list_foreach(dep->mld->links,
+ clear_visited_link,
+ NULL);
}
static void
insert_ets_table(DbTable *tab, void *unused)
{
+ ErlOffHeap off_heap;
+ Eterm heap[ERTS_MAGIC_REF_THING_SIZE];
struct insert_offheap2_arg a;
a.type = ETS_REF;
- a.id = tab->common.id;
+ if (tab->common.status & DB_NAMED_TABLE)
+ a.id = tab->common.the_name;
+ else {
+ Eterm *hp = &heap[0];
+ ERTS_INIT_OFF_HEAP(&off_heap);
+ a.id = erts_mk_magic_ref(&hp, &off_heap, tab->common.btid);
+ }
erts_db_foreach_offheap(tab, insert_offheap2, (void *) &a);
+ if (is_not_atom(a.id))
+ erts_cleanup_offheap(&off_heap);
}
static void
@@ -1248,25 +1717,32 @@ init_referred_dist(void *dist, void *unused)
no_referred_dists++;
}
-#ifdef ERTS_SMP
static void
insert_sys_msg(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp)
{
insert_offheap(&bp->off_heap, HEAP_REF, to);
}
-#endif
static void
insert_delayed_delete_node(void *state,
ErtsMonotonicTime timeout_pos,
void *vnp)
{
- DeclareTmpHeapNoproc(heap,3);
- UseTmpHeapNoproc(3);
+ Eterm heap[3];
insert_node((ErlNode *) vnp,
SYSTEM_REF,
TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer));
- UnUseTmpHeapNoproc(3);
+}
+
+static void
+insert_thr_prgr_delete_dist_entry(void *arg, ErtsThrPrgrVal thr_prgr, void *vdep)
+{
+ DistEntry *dep = vdep;
+ Eterm heap[3];
+ insert_dist_entry(dep,
+ SYSTEM_REF,
+ TUPLE2(&heap[0], AM_system, AM_thread_progress_delete_timer),
+ 0);
}
static void
@@ -1274,13 +1750,66 @@ insert_delayed_delete_dist_entry(void *state,
ErtsMonotonicTime timeout_pos,
void *vdep)
{
- DeclareTmpHeapNoproc(heap,3);
- UseTmpHeapNoproc(3);
- insert_dist_entry((DistEntry *) vdep,
+ DistEntry *dep = vdep;
+ Eterm heap[3];
+ insert_dist_entry(dep,
SYSTEM_REF,
TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer),
0);
- UnUseTmpHeapNoproc(3);
+}
+
+static void
+insert_message(ErtsMessage *msg, int type, Process *proc)
+{
+ ErlHeapFragment *heap_frag = NULL;
+
+ ASSERT(ERTS_SIG_IS_MSG(msg));
+ if (msg->data.attached) {
+ if (msg->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ heap_frag = &msg->hfrag;
+ else if (ERTS_SIG_IS_INTERNAL_MSG(msg))
+ heap_frag = msg->data.heap_frag;
+ else {
+ if (msg->data.dist_ext->dep)
+ insert_dist_entry(msg->data.dist_ext->dep,
+ type, proc->common.id, 0);
+ if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
+ heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
+ }
+ }
+ while (heap_frag) {
+ insert_offheap(&(heap_frag->off_heap),
+ type,
+ proc->common.id);
+ heap_frag = heap_frag->next;
+ }
+}
+
+static void
+insert_sig_msg(ErtsMessage *msg, void *arg)
+{
+ insert_message(msg, SIGNAL_REF, (Process *) arg);
+}
+
+static void
+insert_sig_offheap(ErlOffHeap *ohp, void *arg)
+{
+ Process *proc = arg;
+ insert_offheap(ohp, SIGNAL_REF, proc->common.id);
+}
+
+static void
+insert_sig_monitor(ErtsMonitor *mon, void *arg)
+{
+ Process *proc = arg;
+ insert_monitor_data(mon, SIGNAL_REF, proc->common.id);
+}
+
+static void
+insert_sig_link(ErtsLink *lnk, void *arg)
+{
+ Process *proc = arg;
+ insert_link_data(lnk, SIGNAL_REF, proc->common.id);
}
static void
@@ -1314,9 +1843,12 @@ setup_reference_table(void)
erts_debug_callback_timer_foreach(try_delete_node,
insert_delayed_delete_node,
NULL);
- erts_debug_callback_timer_foreach(try_delete_dist_entry,
+ erts_debug_callback_timer_foreach(prepare_try_delete_dist_entry,
insert_delayed_delete_dist_entry,
NULL);
+ erts_debug_later_op_foreach(start_timer_delete_dist_entry,
+ insert_thr_prgr_delete_dist_entry,
+ NULL);
UseTmpHeapNoproc(3);
insert_node(erts_this_node,
@@ -1335,12 +1867,7 @@ setup_reference_table(void)
Process *proc = erts_pix2proc(i);
if (proc) {
int mli;
- ErtsMessage *msg_list[] = {
- proc->msg.first,
-#ifdef ERTS_SMP
- proc->msg_inq.first,
-#endif
- proc->msg_frag};
+ ErtsMessage *msg_list[] = {proc->msg_frag};
/* Insert Heap */
insert_offheap(&(proc->off_heap),
@@ -1355,40 +1882,36 @@ setup_reference_table(void)
/* Insert msg buffers */
for (mli = 0; mli < sizeof(msg_list)/sizeof(msg_list[0]); mli++) {
ErtsMessage *msg;
- for (msg = msg_list[mli]; msg; msg = msg->next) {
- ErlHeapFragment *heap_frag = NULL;
- if (msg->data.attached) {
- if (msg->data.attached == ERTS_MSG_COMBINED_HFRAG)
- heap_frag = &msg->hfrag;
- else if (is_value(ERL_MESSAGE_TERM(msg)))
- heap_frag = msg->data.heap_frag;
- else {
- if (msg->data.dist_ext->dep)
- insert_dist_entry(msg->data.dist_ext->dep,
- HEAP_REF, proc->common.id, 0);
- if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
- heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
- }
- }
- while (heap_frag) {
- insert_offheap(&(heap_frag->off_heap),
- HEAP_REF,
- proc->common.id);
- heap_frag = heap_frag->next;
- }
- }
+ for (msg = msg_list[mli]; msg; msg = msg->next)
+ insert_message(msg, HEAP_REF, proc);
}
+
+ /* Insert signal queue */
+ erts_proc_sig_debug_foreach_sig(proc,
+ insert_sig_msg,
+ insert_sig_offheap,
+ insert_sig_monitor,
+ insert_sig_link,
+ (void *) proc);
+
/* Insert links */
- if (ERTS_P_LINKS(proc))
- insert_links(ERTS_P_LINKS(proc), proc->common.id);
- if (ERTS_P_MONITORS(proc))
- insert_monitors(ERTS_P_MONITORS(proc), proc->common.id);
+ insert_p_links(&proc->common);
+
+ /* Insert monitors */
+ insert_p_monitors(&proc->common);
+
+ {
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc);
+ if (dep)
+ insert_dist_entry(dep,
+ CTRL_REF,
+ proc->common.id,
+ 0);
+ }
}
}
-#ifdef ERTS_SMP
erts_foreach_sys_msg_in_q(insert_sys_msg);
-#endif
/* Insert all ports */
max = erts_ptab_max(&erts_port);
@@ -1406,18 +1929,17 @@ setup_reference_table(void)
continue;
/* Insert links */
- if (ERTS_P_LINKS(prt))
- insert_links(ERTS_P_LINKS(prt), prt->common.id);
+ insert_p_links(&prt->common);
/* Insert monitors */
- if (ERTS_P_MONITORS(prt))
- insert_monitors(ERTS_P_MONITORS(prt), prt->common.id);
+ insert_p_monitors(&prt->common);
/* Insert port data */
ohp = erts_port_data_offheap(prt);
if (ohp)
insert_offheap(ohp, HEAP_REF, prt->common.id);
/* Insert controller */
- if (prt->dist_entry)
- insert_dist_entry(prt->dist_entry,
+ dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
+ if (dep)
+ insert_dist_entry(dep,
CTRL_REF,
prt->common.id,
0);
@@ -1459,32 +1981,25 @@ setup_reference_table(void)
/* Insert all dist links */
for(dep = erts_visible_dist_entries; dep; dep = dep->next) {
- if(dep->nlinks)
- insert_links2(dep->nlinks, dep->sysname);
- if(dep->node_links)
- insert_links(dep->node_links, dep->sysname);
- if(dep->monitors)
- insert_monitors(dep->monitors, dep->sysname);
+ insert_dist_links(dep);
+ insert_dist_monitors(dep);
}
for(dep = erts_hidden_dist_entries; dep; dep = dep->next) {
- if(dep->nlinks)
- insert_links2(dep->nlinks, dep->sysname);
- if(dep->node_links)
- insert_links(dep->node_links, dep->sysname);
- if(dep->monitors)
- insert_monitors(dep->monitors, dep->sysname);
+ insert_dist_links(dep);
+ insert_dist_monitors(dep);
+ }
+
+ for(dep = erts_pending_dist_entries; dep; dep = dep->next) {
+ insert_dist_links(dep);
+ insert_dist_monitors(dep);
}
/* Not connected dist entries should not have any links,
but inspect them anyway */
for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
- if(dep->nlinks)
- insert_links2(dep->nlinks, dep->sysname);
- if(dep->node_links)
- insert_links(dep->node_links, dep->sysname);
- if(dep->monitors)
- insert_monitors(dep->monitors, dep->sysname);
+ insert_dist_links(dep);
+ insert_dist_monitors(dep);
}
/* Insert all ets tables */
@@ -1517,7 +2032,7 @@ setup_reference_table(void)
*/
static Eterm
-reference_table_term(Uint **hpp, Uint *szp)
+reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp)
{
#undef MK_2TUP
#undef MK_3TUP
@@ -1569,15 +2084,18 @@ reference_table_term(Uint **hpp, Uint *szp)
tup = MK_2TUP(AM_system, MK_UINT(nrp->system_ref));
nrl = MK_CONS(tup, nrl);
}
+ if(nrp->signal_ref) {
+ tup = MK_2TUP(AM_signal, MK_UINT(nrp->signal_ref));
+ nrl = MK_CONS(tup, nrl);
+ }
nrid = nrp->id;
if (!IS_CONST(nrp->id)) {
-
Uint nrid_sz = size_object(nrp->id);
if (szp)
*szp += nrid_sz;
if (hpp)
- nrid = copy_struct(nrp->id, nrid_sz, hpp, NULL);
+ nrid = copy_struct(nrp->id, nrid_sz, hpp, ohp);
}
if (is_internal_pid(nrid) || nrid == am_error_logger) {
@@ -1593,24 +2111,25 @@ reference_table_term(Uint **hpp, Uint *szp)
}
else if(is_internal_port(nrid)) {
ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref
- && !nrp->timer_ref && !nrp->system_ref);
+ && !nrp->timer_ref && !nrp->system_ref && !nrp->signal_ref);
tup = MK_2TUP(AM_port, nrid);
}
else if(nrp->ets_ref) {
ASSERT(!nrp->heap_ref && !nrp->link_ref &&
!nrp->monitor_ref && !nrp->bin_ref
- && !nrp->timer_ref && !nrp->system_ref);
+ && !nrp->timer_ref && !nrp->system_ref && !nrp->signal_ref);
tup = MK_2TUP(AM_ets, nrid);
}
else if(nrp->bin_ref) {
ASSERT(is_small(nrid) || is_big(nrid));
ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->link_ref &&
!nrp->monitor_ref && !nrp->timer_ref
- && !nrp->system_ref);
+ && !nrp->system_ref && !nrp->signal_ref);
tup = MK_2TUP(AM_match_spec, nrid);
}
else {
- ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref);
+ ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref
+ && !nrp->signal_ref);
ASSERT(is_atom(nrid));
tup = MK_2TUP(AM_dist, nrid);
}
@@ -1646,29 +2165,44 @@ reference_table_term(Uint **hpp, Uint *szp)
tup = MK_2TUP(AM_heap, MK_UINT(drp->heap_ref));
drl = MK_CONS(tup, drl);
}
+ if(drp->ets_ref) {
+ tup = MK_2TUP(AM_ets, MK_UINT(drp->ets_ref));
+ drl = MK_CONS(tup, drl);
+ }
if(drp->system_ref) {
tup = MK_2TUP(AM_system, MK_UINT(drp->system_ref));
drl = MK_CONS(tup, drl);
}
+ if(drp->signal_ref) {
+ tup = MK_2TUP(AM_signal, MK_UINT(drp->signal_ref));
+ drl = MK_CONS(tup, drl);
+ }
if (is_internal_pid(drp->id)) {
ASSERT(!drp->node_ref);
tup = MK_2TUP(AM_process, drp->id);
}
else if(is_internal_port(drp->id)) {
- ASSERT(drp->ctrl_ref && !drp->node_ref);
+ ASSERT(drp->ctrl_ref && !drp->node_ref && !drp->signal_ref);
tup = MK_2TUP(AM_port, drp->id);
}
else if (is_tuple(drp->id)) {
Eterm *t;
ASSERT(drp->system_ref && !drp->node_ref
- && !drp->ctrl_ref && !drp->heap_ref);
+ && !drp->ctrl_ref && !drp->heap_ref && !drp->ets_ref
+ && !drp->signal_ref);
t = tuple_val(drp->id);
ASSERT(2 == arityval(t[0]));
tup = MK_2TUP(t[1], t[2]);
}
+ else if (drp->ets_ref) {
+ ASSERT(!drp->heap_ref && !drp->node_ref &&
+ !drp->ctrl_ref && !drp->system_ref
+ && !drp->signal_ref);
+ tup = MK_2TUP(AM_ets, drp->id);
+ }
else {
- ASSERT(!drp->ctrl_ref && drp->node_ref);
+ ASSERT(!drp->ctrl_ref && drp->node_ref && !drp->signal_ref);
ASSERT(is_atom(drp->id));
tup = MK_2TUP(drp->id, MK_UINT(drp->creation));
tup = MK_2TUP(AM_node, tup);
@@ -1684,7 +2218,7 @@ reference_table_term(Uint **hpp, Uint *szp)
/* DistList = [{Dist, Refc, ReferenceIdList}] */
tup = MK_3TUP(referred_dists[i].dist->sysname,
- MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 0)),
+ MK_UINT(de_refc_read(referred_dists[i].dist, 0)),
dril);
dl = MK_CONS(tup, dl);
}
@@ -1703,15 +2237,27 @@ reference_table_term(Uint **hpp, Uint *szp)
}
+static void noop_sig_msg(ErtsMessage *msg, void *arg)
+{
+
+}
+
+static void noop_sig_offheap(ErlOffHeap *oh, void *arg)
+{
+
+}
+
static void
delete_reference_table(void)
{
- Uint i;
+ DistEntry *dep;
+ int i, max;
for(i = 0; i < no_referred_nodes; i++) {
NodeReferrer *nrp;
NodeReferrer *tnrp;
nrp = referred_nodes[i].referrers;
while(nrp) {
+ erts_cleanup_offheap(&nrp->off_heap);
tnrp = nrp;
nrp = nrp->next;
erts_free(ERTS_ALC_T_NC_TMP, (void *) tnrp);
@@ -1737,17 +2283,71 @@ delete_reference_table(void)
inserted_bins = inserted_bins->next;
erts_free(ERTS_ALC_T_NC_TMP, (void *)ib);
}
+
+ /* Cleanup... */
+
+ max = erts_ptab_max(&erts_proc);
+ for (i = 0; i < max; i++) {
+ Process *proc = erts_pix2proc(i);
+ if (proc) {
+ clear_visited_p_links(&proc->common);
+ clear_visited_p_monitors(&proc->common);
+ erts_proc_sig_debug_foreach_sig(proc,
+ noop_sig_msg,
+ noop_sig_offheap,
+ clear_visited_monitor,
+ clear_visited_link,
+ (void *) proc);
+ }
+ }
+
+ max = erts_ptab_max(&erts_port);
+ for (i = 0; i < max; i++) {
+ erts_aint32_t state;
+ Port *prt;
+
+ prt = erts_pix2port(i);
+ if (!prt)
+ continue;
+
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_DEAD)
+ continue;
+
+ clear_visited_p_links(&prt->common);
+ clear_visited_p_monitors(&prt->common);
+ }
+
+ for(dep = erts_visible_dist_entries; dep; dep = dep->next) {
+ clear_visited_dist_links(dep);
+ clear_visited_dist_monitors(dep);
+ }
+
+ for(dep = erts_hidden_dist_entries; dep; dep = dep->next) {
+ clear_visited_dist_links(dep);
+ clear_visited_dist_monitors(dep);
+ }
+
+ for(dep = erts_pending_dist_entries; dep; dep = dep->next) {
+ clear_visited_dist_links(dep);
+ clear_visited_dist_monitors(dep);
+ }
+
+ for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
+ clear_visited_dist_links(dep);
+ clear_visited_dist_monitors(dep);
+ }
}
void
erts_debug_test_node_tab_delayed_delete(Sint64 millisecs)
{
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
if (millisecs < 0)
node_tab_delete_delay = orig_node_tab_delete_delay;
else
node_tab_delete_delay = millisecs;
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
}
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index 7a4434acbf..c44f1f8991 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,7 +18,17 @@
* %CopyrightEnd%
*/
-#ifndef ERL_NODE_TABLES_H__
+#ifndef ERL_NODE_TABLES_BASIC__
+#define ERL_NODE_TABLES_BASIC__
+
+typedef struct dist_entry_ DistEntry;
+typedef struct ErtsDistOutputBuf_ ErtsDistOutputBuf;
+void erts_ref_dist_entry(DistEntry *dep);
+void erts_deref_dist_entry(DistEntry *dep);
+
+#endif
+
+#if !defined(ERL_NODE_TABLES_BASIC_ONLY) && !defined(ERL_NODE_TABLES_H__)
#define ERL_NODE_TABLES_H__
/*
@@ -42,12 +52,14 @@
#include "sys.h"
#include "hash.h"
#include "erl_alloc.h"
-#include "erl_process.h"
-#include "erl_monitors.h"
-#include "erl_smp.h"
#define ERTS_PORT_TASK_ONLY_BASIC_TYPES__
#include "erl_port_task.h"
#undef ERTS_PORT_TASK_ONLY_BASIC_TYPES__
+#include "erl_process.h"
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
+#include "erl_monitor_link.h"
#define ERTS_NODE_TAB_DELAY_GC_DEFAULT (60)
#define ERTS_NODE_TAB_DELAY_GC_MAX (100*1000*1000)
@@ -55,17 +67,24 @@
#define ERST_INTERNAL_CHANNEL_NO 0
-#define ERTS_DE_SFLG_CONNECTED (((Uint32) 1) << 0)
-#define ERTS_DE_SFLG_EXITING (((Uint32) 1) << 1)
-
-#define ERTS_DE_SFLGS_ALL (ERTS_DE_SFLG_CONNECTED \
- | ERTS_DE_SFLG_EXITING)
+enum dist_entry_state {
+ ERTS_DE_STATE_IDLE,
+ ERTS_DE_STATE_PENDING,
+ ERTS_DE_STATE_CONNECTED,
+ ERTS_DE_STATE_EXITING
+};
-#define ERTS_DE_QFLG_BUSY (((Uint32) 1) << 0)
-#define ERTS_DE_QFLG_EXIT (((Uint32) 1) << 1)
+#define ERTS_DE_QFLG_BUSY (((erts_aint32_t) 1) << 0)
+#define ERTS_DE_QFLG_EXIT (((erts_aint32_t) 1) << 1)
+#define ERTS_DE_QFLG_REQ_INFO (((erts_aint32_t) 1) << 2)
+#define ERTS_DE_QFLG_PORT_CTRL (((erts_aint32_t) 1) << 3)
+#define ERTS_DE_QFLG_PROC_CTRL (((erts_aint32_t) 1) << 4)
#define ERTS_DE_QFLGS_ALL (ERTS_DE_QFLG_BUSY \
- | ERTS_DE_QFLG_EXIT)
+ | ERTS_DE_QFLG_EXIT \
+ | ERTS_DE_QFLG_REQ_INFO \
+ | ERTS_DE_QFLG_PORT_CTRL \
+ | ERTS_DE_QFLG_PROC_CTRL)
#if defined(ARCH_64)
#define ERTS_DIST_OUTPUT_BUF_DBG_PATTERN ((Uint) 0xf713f713f713f713UL)
@@ -73,14 +92,16 @@
#define ERTS_DIST_OUTPUT_BUF_DBG_PATTERN ((Uint) 0xf713f713)
#endif
-typedef struct ErtsDistOutputBuf_ ErtsDistOutputBuf;
struct ErtsDistOutputBuf_ {
#ifdef DEBUG
Uint dbg_pattern;
+ byte *alloc_endp;
#endif
ErtsDistOutputBuf *next;
+ Uint hopefull_flags;
byte *extp;
byte *ext_endp;
+ byte *msg_start;
byte data[1];
};
@@ -101,51 +122,46 @@ struct ErtsProcList_;
* unlock mutexes with higher numbers before mutexes with higher numbers.
*/
-struct erl_link;
-
-typedef struct dist_entry_ {
+struct dist_entry_ {
HashBucket hash_bucket; /* Hash bucket */
struct dist_entry_ *next; /* Next entry in dist_table (not sorted) */
struct dist_entry_ *prev; /* Previous entry in dist_table (not sorted) */
- erts_refc_t refc; /* Reference count */
- erts_smp_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */
+ erts_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */
Eterm sysname; /* name@host atom for efficiency */
Uint32 creation; /* creation of connected node */
- Eterm cid; /* connection handler (pid or port), NIL == free */
+ erts_atomic_t input_handler; /* Input handler */
+ Eterm cid; /* connection handler (pid or port),
+ NIL == free */
Uint32 connection_id; /* Connection id incremented on connect */
- Uint32 status; /* Slot status, like exiting reserved etc */
+ enum dist_entry_state state;
Uint32 flags; /* Distribution flags, like hidden,
atom cache etc. */
unsigned long version; /* Protocol version */
+ ErtsMonLnkDist *mld; /* Monitors and links */
- erts_smp_mtx_t lnk_mtx; /* Protects node_links, nlinks, and
- monitors. */
- ErtsLink *node_links; /* In a dist entry, node links are kept
- in a separate tree, while they are
- colocted with the ordinary link tree
- for processes. It's not due to confusion,
- it's because the link tree for the dist
- entry is in two levels, see erl_monitors.h
- */
- ErtsLink *nlinks; /* Link tree with subtrees */
- ErtsMonitor *monitors; /* Monitor tree */
-
- erts_smp_mtx_t qlock; /* Protects qflgs and out_queue */
- Uint32 qflgs;
- Sint qsize;
+ erts_mtx_t qlock; /* Protects qflgs and out_queue */
+ erts_atomic32_t qflgs;
+ erts_atomic_t qsize;
+ erts_atomic64_t in;
+ erts_atomic64_t out;
ErtsDistOutputQueue out_queue;
struct ErtsProcList_ *suspended;
+ ErtsDistOutputQueue tmp_out_queue;
ErtsDistOutputQueue finalized_out_queue;
- erts_smp_atomic_t dist_cmd_scheduled;
+ erts_atomic_t dist_cmd_scheduled;
ErtsPortTaskHandle dist_cmd;
Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf);
struct cache* cache; /* The atom cache */
-} DistEntry;
+
+ ErtsThrPrgrLaterOp later_op;
+
+ struct transcode_context* transcode_ctx;
+};
typedef struct erl_node_ {
HashBucket hash_bucket; /* Hash bucket */
@@ -158,14 +174,16 @@ typedef struct erl_node_ {
extern Hash erts_dist_table;
extern Hash erts_node_table;
-extern erts_smp_rwmtx_t erts_dist_table_rwmtx;
-extern erts_smp_rwmtx_t erts_node_table_rwmtx;
+extern erts_rwmtx_t erts_dist_table_rwmtx;
+extern erts_rwmtx_t erts_node_table_rwmtx;
extern DistEntry *erts_hidden_dist_entries;
extern DistEntry *erts_visible_dist_entries;
+extern DistEntry *erts_pending_dist_entries;
extern DistEntry *erts_not_connected_dist_entries;
extern Sint erts_no_of_hidden_dist_entries;
extern Sint erts_no_of_visible_dist_entries;
+extern Sint erts_no_of_pending_dist_entries;
extern Sint erts_no_of_not_connected_dist_entries;
extern DistEntry *erts_this_dist_entry;
@@ -179,42 +197,37 @@ DistEntry *erts_find_or_insert_dist_entry(Eterm);
DistEntry *erts_find_dist_entry(Eterm);
void erts_schedule_delete_dist_entry(DistEntry *);
Uint erts_dist_table_size(void);
-void erts_dist_table_info(int, void *);
+void erts_dist_table_info(fmtfn_t, void *);
void erts_set_dist_entry_not_connected(DistEntry *);
+void erts_set_dist_entry_pending(DistEntry *);
void erts_set_dist_entry_connected(DistEntry *, Eterm, Uint);
ErlNode *erts_find_or_insert_node(Eterm, Uint32);
void erts_schedule_delete_node(ErlNode *);
void erts_set_this_node(Eterm, Uint);
Uint erts_node_table_size(void);
void erts_init_node_tables(int);
-void erts_node_table_info(int, void *);
-void erts_print_node_info(int, void *, Eterm, int*, int*);
+void erts_node_table_info(fmtfn_t, void *);
+void erts_print_node_info(fmtfn_t, void *, Eterm, int*, int*);
Eterm erts_get_node_and_dist_references(struct process *);
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int erts_lc_is_de_rwlocked(DistEntry *);
int erts_lc_is_de_rlocked(DistEntry *);
#endif
+int erts_dist_entry_destructor(Binary *bin);
+DistEntry *erts_dhandle_to_dist_entry(Eterm dhandle, Uint32* connection_id);
+#define ERTS_DHANDLE_SIZE (3+ERTS_MAGIC_REF_THING_SIZE)
+Eterm erts_build_dhandle(Eterm **hpp, ErlOffHeap*, DistEntry*, Uint32 conn_id);
+Eterm erts_make_dhandle(Process *c_p, DistEntry*, Uint32 conn_id);
-ERTS_GLB_INLINE void erts_deref_dist_entry(DistEntry *dep);
ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np);
-ERTS_GLB_INLINE void erts_smp_de_rlock(DistEntry *dep);
-ERTS_GLB_INLINE void erts_smp_de_runlock(DistEntry *dep);
-ERTS_GLB_INLINE void erts_smp_de_rwlock(DistEntry *dep);
-ERTS_GLB_INLINE void erts_smp_de_rwunlock(DistEntry *dep);
-ERTS_GLB_INLINE void erts_smp_de_links_lock(DistEntry *dep);
-ERTS_GLB_INLINE void erts_smp_de_links_unlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_de_rlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_de_runlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_de_rwlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_de_rwunlock(DistEntry *dep);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_deref_dist_entry(DistEntry *dep)
-{
- ASSERT(dep);
- if (erts_refc_dectest(&dep->refc, 0) == 0)
- erts_schedule_delete_dist_entry(dep);
-}
-
-ERTS_GLB_INLINE void
erts_deref_node_entry(ErlNode *np)
{
ASSERT(np);
@@ -223,43 +236,32 @@ erts_deref_node_entry(ErlNode *np)
}
ERTS_GLB_INLINE void
-erts_smp_de_rlock(DistEntry *dep)
-{
- erts_smp_rwmtx_rlock(&dep->rwmtx);
-}
-
-ERTS_GLB_INLINE void
-erts_smp_de_runlock(DistEntry *dep)
-{
- erts_smp_rwmtx_runlock(&dep->rwmtx);
-}
-
-ERTS_GLB_INLINE void
-erts_smp_de_rwlock(DistEntry *dep)
+erts_de_rlock(DistEntry *dep)
{
- erts_smp_rwmtx_rwlock(&dep->rwmtx);
+ erts_rwmtx_rlock(&dep->rwmtx);
}
ERTS_GLB_INLINE void
-erts_smp_de_rwunlock(DistEntry *dep)
+erts_de_runlock(DistEntry *dep)
{
- erts_smp_rwmtx_rwunlock(&dep->rwmtx);
+ erts_rwmtx_runlock(&dep->rwmtx);
}
ERTS_GLB_INLINE void
-erts_smp_de_links_lock(DistEntry *dep)
+erts_de_rwlock(DistEntry *dep)
{
- erts_smp_mtx_lock(&dep->lnk_mtx);
+ erts_rwmtx_rwlock(&dep->rwmtx);
}
ERTS_GLB_INLINE void
-erts_smp_de_links_unlock(DistEntry *dep)
+erts_de_rwunlock(DistEntry *dep)
{
- erts_smp_mtx_unlock(&dep->lnk_mtx);
+ erts_rwmtx_rwunlock(&dep->rwmtx);
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
void erts_debug_test_node_tab_delayed_delete(Sint64 millisecs);
+void erts_lcnt_update_distribution_locks(int enable);
#endif
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index f90844ccc8..25976d38cc 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,6 +31,9 @@ typedef struct ErtsProc2PortSigData_ ErtsProc2PortSigData;
#include "erl_ptab.h"
#include "erl_thr_progress.h"
#include "erl_trace.h"
+#define ERTS_IO_QUEUE_TYPES_ONLY__
+#include "erl_io_queue.h"
+#undef ERTS_IO_QUEUE_TYPES_ONLY__
#ifndef __WIN32__
#define ERTS_DEFAULT_MAX_PORTS (1 << 16)
@@ -75,23 +78,8 @@ typedef struct erts_driver_t_ erts_driver_t;
#define ERTS_Port2ErlDrvPort(PH) ((ErlDrvPort) (PH))
#endif
-#define SMALL_IO_QUEUE 5 /* Number of fixed elements */
+typedef ErtsIOQueue ErlPortIOQueue;
-typedef struct {
- ErlDrvSizeT size; /* total size in bytes */
-
- SysIOVec* v_start;
- SysIOVec* v_end;
- SysIOVec* v_head;
- SysIOVec* v_tail;
- SysIOVec v_small[SMALL_IO_QUEUE];
-
- ErlDrvBinary** b_start;
- ErlDrvBinary** b_end;
- ErlDrvBinary** b_head;
- ErlDrvBinary** b_tail;
- ErlDrvBinary* b_small[SMALL_IO_QUEUE];
-} ErlIOQueue;
typedef struct line_buf { /* Buffer used in line oriented I/O */
ErlDrvSizeT bufsiz; /* Size of character buffer */
@@ -124,16 +112,16 @@ typedef struct line_buf { /* Buffer used in line oriented I/O */
*/
#define ERTS_PRTSD_SCHED_ID 0
+#define ERTS_PRTSD_DIST_ENTRY 1
+#define ERTS_PRTSD_CONN_ID 2
-#define ERTS_PRTSD_SIZE 1
+#define ERTS_PRTSD_SIZE 3
typedef struct {
void *data[ERTS_PRTSD_SIZE];
} ErtsPrtSD;
-#ifdef ERTS_SMP
typedef struct ErtsXPortsList_ ErtsXPortsList;
-#endif
/*
* Port locking:
@@ -158,22 +146,16 @@ struct _erl_drv_port {
ErtsPortTaskSched sched;
ErtsPortTaskHandle timeout_task;
-#ifdef ERTS_SMP
erts_mtx_t *lock;
ErtsXPortsList *xports;
- erts_smp_atomic_t run_queue;
-#else
- erts_atomic32_t refc;
- int cleanup;
-#endif
+ erts_atomic_t run_queue;
erts_atomic_t connected; /* A connected process */
Eterm caller; /* Current caller. */
- erts_smp_atomic_t data; /* Data associated with port. */
+ erts_atomic_t data; /* Data associated with port. */
Uint bytes_in; /* Number of bytes read */
Uint bytes_out; /* Number of bytes written */
- ErlIOQueue ioq; /* driver accessible i/o queue */
- DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */
+ ErlPortIOQueue ioq; /* driver accessible i/o queue */
char *name; /* String used in the open */
erts_driver_t* drv_ptr;
UWord drv_data;
@@ -185,7 +167,7 @@ struct _erl_drv_port {
int control_flags; /* Flags for port_control() */
ErlDrvPDL port_data_lock;
- erts_smp_atomic_t psd; /* Port specific data */
+ erts_atomic_t psd; /* Port specific data */
int reds; /* Only used while executing driver callbacks */
struct {
@@ -199,6 +181,7 @@ void erts_init_port_data(Port *);
void erts_cleanup_port_data(Port *);
Uint erts_port_data_size(Port *);
ErlOffHeap *erts_port_data_offheap(Port *);
+Eterm erts_port_data_read(Port* prt);
#define ERTS_PORT_GET_CONNECTED(PRT) \
((Eterm) erts_atomic_read_nob(&(PRT)->connected))
@@ -214,31 +197,53 @@ struct erl_drv_port_data_lock {
Port *prt;
};
+ERTS_GLB_INLINE void erts_init_runq_port(Port *prt, ErtsRunQueue *runq);
+ERTS_GLB_INLINE void erts_set_runq_port(Port *prt, ErtsRunQueue *runq);
+ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_port(Port *prt);
ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void
+erts_init_runq_port(Port *prt, ErtsRunQueue *runq)
+{
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+ erts_atomic_init_nob(&prt->run_queue, (erts_aint_t) runq);
+}
+
+ERTS_GLB_INLINE void
+erts_set_runq_port(Port *prt, ErtsRunQueue *runq)
+{
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+ erts_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq);
+}
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_get_runq_port(Port *prt)
+{
+ ErtsRunQueue *runq;
+ runq = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue);
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+ return runq;
+}
+
+
ERTS_GLB_INLINE ErtsRunQueue *
erts_port_runq(Port *prt)
{
-#ifdef ERTS_SMP
ErtsRunQueue *rq1, *rq2;
- rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
- if (!rq1)
- return NULL;
+ rq1 = erts_get_runq_port(prt);
while (1) {
- erts_smp_runq_lock(rq1);
- rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
+ erts_runq_lock(rq1);
+ rq2 = erts_get_runq_port(prt);
if (rq1 == rq2)
return rq1;
- erts_smp_runq_unlock(rq1);
+ erts_runq_unlock(rq1);
rq1 = rq2;
- if (!rq1)
- return NULL;
}
-#else
- return ERTS_RUNQ_IX(0);
-#endif
}
#endif
@@ -252,10 +257,12 @@ ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new);
ERTS_GLB_INLINE void *
erts_prtsd_get(Port *prt, int ix)
{
- ErtsPrtSD *psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd);
+ ErtsPrtSD *psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd);
+
+ ASSERT((unsigned)ix < ERTS_PRTSD_SIZE);
if (!psd)
return NULL;
- ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+ ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
return psd->data[ix];
}
@@ -266,16 +273,15 @@ erts_prtsd_set(Port *prt, int ix, void *data)
void *old;
int i;
- psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd);
+ psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd);
+ ASSERT((unsigned)ix < ERTS_PRTSD_SIZE);
if (psd) {
-#ifdef ERTS_SMP
#ifdef ETHR_ORDERED_READ_DEPEND
ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
#else
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreStore);
#endif
-#endif
old = psd->data[ix];
psd->data[ix] = data;
return old;
@@ -287,7 +293,7 @@ erts_prtsd_set(Port *prt, int ix, void *data)
new_psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD));
for (i = 0; i < ERTS_PRTSD_SIZE; i++)
new_psd->data[i] = NULL;
- psd = (ErtsPrtSD *) erts_smp_atomic_cmpxchg_mb(&prt->psd,
+ psd = (ErtsPrtSD *) erts_atomic_cmpxchg_mb(&prt->psd,
(erts_aint_t) new_psd,
(erts_aint_t) NULL);
if (psd)
@@ -328,6 +334,8 @@ Eterm erts_request_io_bytes(Process *c_p);
#define ERTS_PORT_SFLG_INVALID ((Uint32) (1 << 11))
/* Last port to terminate halts the emulator */
#define ERTS_PORT_SFLG_HALT ((Uint32) (1 << 12))
+/* Check if the event in ready_input should be cleaned */
+#define ERTS_PORT_SFLG_CHECK_FD_CLEANUP ((Uint32) (1 << 13))
#ifdef DEBUG
/* Only debug: make sure all flags aren't cleared unintentionally */
#define ERTS_PORT_SFLG_PORT_DEBUG ((Uint32) (1 << 31))
@@ -369,17 +377,12 @@ Eterm erts_request_io_bytes(Process *c_p);
#define ERTS_PORT_REDS_INFO (CONTEXT_REDS/100)
#define ERTS_PORT_REDS_TERMINATE (CONTEXT_REDS/50)
-void print_port_info(Port *, int, void *);
+void print_port_info(Port *, fmtfn_t, void *);
void erts_port_free(Port *);
-#ifndef ERTS_SMP
-void erts_port_cleanup(Port *);
-#endif
-void erts_fire_port_monitor(Port *prt, Eterm ref);
-#ifdef ERTS_SMP
+void erts_fire_port_monitor(Port *prt, ErtsMonitor *tmon);
int erts_port_handle_xports(Port *);
-#endif
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int erts_lc_is_port_locked(Port *);
#endif
@@ -388,9 +391,9 @@ ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt);
ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc);
ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt);
-ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt);
-ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt);
-ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt);
+ERTS_GLB_INLINE int erts_port_trylock(Port *prt);
+ERTS_GLB_INLINE void erts_port_lock(Port *prt);
+ERTS_GLB_INLINE void erts_port_unlock(Port *prt);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -419,35 +422,27 @@ ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt)
}
ERTS_GLB_INLINE int
-erts_smp_port_trylock(Port *prt)
+erts_port_trylock(Port *prt)
{
-#ifdef ERTS_SMP
/* *Need* to be a managed thread */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
return erts_mtx_trylock(prt->lock);
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void
-erts_smp_port_lock(Port *prt)
+erts_port_lock(Port *prt)
{
-#ifdef ERTS_SMP
/* *Need* to be a managed thread */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
erts_mtx_lock(prt->lock);
-#endif
}
ERTS_GLB_INLINE void
-erts_smp_port_unlock(Port *prt)
+erts_port_unlock(Port *prt)
{
-#ifdef ERTS_SMP
/* *Need* to be a managed thread */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
erts_mtx_unlock(prt->lock);
-#endif
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
@@ -470,7 +465,7 @@ erts_smp_port_unlock(Port *prt)
ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP)
#define ERTS_PORT_SCHED_ID(P, ID) \
- ((Uint) (UWord) erts_prtsd_set((P), ERTS_PSD_SCHED_ID, (void *) (UWord) (ID)))
+ ((Uint) (UWord) erts_prtsd_set((P), ERTS_PRTSD_SCHED_ID, (void *) (UWord) (ID)))
extern const Port erts_invalid_port;
#define ERTS_PORT_LOCK_BUSY ((Port *) &erts_invalid_port)
@@ -478,9 +473,7 @@ extern const Port erts_invalid_port;
int erts_is_port_ioq_empty(Port *);
void erts_terminate_port(Port *);
-#ifdef ERTS_SMP
Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks);
-#endif
ERTS_GLB_INLINE Port *erts_pix2port(int);
ERTS_GLB_INLINE Port *erts_port_lookup_raw(Eterm);
@@ -488,11 +481,9 @@ ERTS_GLB_INLINE Port *erts_port_lookup(Eterm, Uint32);
ERTS_GLB_INLINE Port*erts_id2port(Eterm id);
ERTS_GLB_INLINE Port *erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32);
ERTS_GLB_INLINE void erts_port_release(Port *);
-#ifdef ERTS_SMP
ERTS_GLB_INLINE Port *erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs);
ERTS_GLB_INLINE Port *erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs);
ERTS_GLB_INLINE void erts_thr_port_release(Port *prt);
-#endif
ERTS_GLB_INLINE Port *erts_thr_drvport2port(ErlDrvPort, int);
ERTS_GLB_INLINE Port *erts_drvport2port_state(ErlDrvPort, erts_aint32_t *);
ERTS_GLB_INLINE Eterm erts_drvport2id(ErlDrvPort);
@@ -500,6 +491,8 @@ ERTS_GLB_INLINE Uint32 erts_portid2status(Eterm);
ERTS_GLB_INLINE int erts_is_port_alive(Eterm);
ERTS_GLB_INLINE int erts_is_valid_tracer_port(Eterm);
ERTS_GLB_INLINE int erts_port_driver_callback_epilogue(Port *, erts_aint32_t *);
+ERTS_GLB_INLINE Port *erts_get_current_port(void);
+ERTS_GLB_INLINE Eterm erts_get_current_port_id(void);
#define erts_drvport2port(Prt) erts_drvport2port_state((Prt), NULL)
@@ -518,7 +511,7 @@ erts_port_lookup_raw(Eterm id)
{
Port *prt;
- ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
+ ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying());
if (is_not_internal_port(id))
return NULL;
@@ -547,7 +540,7 @@ erts_id2port(Eterm id)
Port *prt;
/* Only allowed to be called from managed threads */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
if (is_not_internal_port(id))
return NULL;
@@ -558,10 +551,10 @@ erts_id2port(Eterm id)
if (!prt || prt->common.id != id)
return NULL;
- erts_smp_port_lock(prt);
+ erts_port_lock(prt);
state = erts_atomic32_read_nob(&prt->state);
if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) {
- erts_smp_port_unlock(prt);
+ erts_port_unlock(prt);
return NULL;
}
@@ -574,14 +567,12 @@ erts_id2port_sflgs(Eterm id,
Process *c_p, ErtsProcLocks c_p_locks,
Uint32 invalid_sflgs)
{
-#ifdef ERTS_SMP
int no_proc_locks = !c_p || !c_p_locks;
-#endif
erts_aint32_t state;
Port *prt;
/* Only allowed to be called from managed threads */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
if (is_not_internal_port(id))
return NULL;
@@ -592,21 +583,17 @@ erts_id2port_sflgs(Eterm id,
if (!prt || prt->common.id != id)
return NULL;
-#ifdef ERTS_SMP
if (no_proc_locks)
- erts_smp_port_lock(prt);
- else if (erts_smp_port_trylock(prt) == EBUSY) {
+ erts_port_lock(prt);
+ else if (erts_port_trylock(prt) == EBUSY) {
/* Unlock process locks, and acquire locks in lock order... */
- erts_smp_proc_unlock(c_p, c_p_locks);
- erts_smp_port_lock(prt);
- erts_smp_proc_lock(c_p, c_p_locks);
+ erts_proc_unlock(c_p, c_p_locks);
+ erts_port_lock(prt);
+ erts_proc_lock(c_p, c_p_locks);
}
-#endif
state = erts_atomic32_read_nob(&prt->state);
if (state & invalid_sflgs) {
-#ifdef ERTS_SMP
- erts_smp_port_unlock(prt);
-#endif
+ erts_port_unlock(prt);
return NULL;
}
@@ -617,18 +604,10 @@ ERTS_GLB_INLINE void
erts_port_release(Port *prt)
{
/* Only allowed to be called from managed threads */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
-#ifdef ERTS_SMP
- erts_smp_port_unlock(prt);
-#else
- if (prt->cleanup) {
- prt->cleanup = 0;
- erts_port_cleanup(prt);
- }
-#endif
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ erts_port_unlock(prt);
}
-#ifdef ERTS_SMP
/*
* erts_thr_id2port_sflgs() and erts_port_dec_refc(prt) can
* be used by unmanaged threads in the SMP case.
@@ -714,13 +693,10 @@ ERTS_GLB_INLINE void
erts_thr_port_release(Port *prt)
{
erts_mtx_unlock(prt->lock);
-#ifdef ERTS_SMP
if (!erts_thr_progress_is_managed_thread())
erts_port_dec_refc(prt);
-#endif
}
-#endif
ERTS_GLB_INLINE Port *
erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl)
@@ -733,10 +709,10 @@ erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl)
if (lock_pdl && prt->port_data_lock)
driver_pdl_lock(prt->port_data_lock);
-#if ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_ENABLE_LOCK_CHECK
if (!ERTS_IS_CRASH_DUMPING) {
if (erts_lc_is_emu_thr()) {
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
ERTS_LC_ASSERT(!prt->port_data_lock
|| erts_lc_mtx_is_locked(&prt->port_data_lock->mtx));
}
@@ -765,7 +741,7 @@ erts_drvport2port_state(ErlDrvPort drvport, erts_aint32_t *statep)
// ERTS_LC_ASSERT(erts_lc_is_emu_thr());
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return ERTS_INVALID_ERL_DRV_PORT;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)
|| ERTS_IS_CRASH_DUMPING);
/*
* This state check is only needed since a driver callback
@@ -822,23 +798,21 @@ erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep)
int reds = 0;
erts_aint32_t state;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
state = erts_atomic32_read_nob(&prt->state);
if ((state & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(prt)) {
reds += ERTS_PORT_REDS_TERMINATE;
erts_terminate_port(prt);
state = erts_atomic32_read_nob(&prt->state);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
}
-#ifdef ERTS_SMP
if (prt->xports) {
reds += erts_port_handle_xports(prt);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT(!prt->xports);
}
-#endif
if (statep)
*statep = state;
@@ -846,6 +820,20 @@ erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep)
return reds;
}
+ERTS_GLB_INLINE
+Port *erts_get_current_port(void)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ return esdp ? esdp->current_port : NULL;
+}
+
+ERTS_GLB_INLINE
+Eterm erts_get_current_port_id(void)
+{
+ Port *port = erts_get_current_port();
+ return port ? port->common.id : THE_NON_VALUE;
+}
+
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
void erts_port_resume_procs(Port *);
@@ -921,20 +909,20 @@ struct ErtsProc2PortSigData_ {
Eterm item;
} info;
struct {
- Eterm port;
- Eterm to;
+ Eterm port_id;
+ ErtsLink *lnk;
} link;
struct {
- Eterm from;
+ Eterm port_id;
+ ErtsLink *lnk;
} unlink;
struct {
- Eterm origin; /* who receives monitor event, pid */
- Eterm name; /* either name for named monitor, or port id */
+ Eterm port_id;
+ ErtsMonitor *mon;
} monitor;
struct {
- Eterm origin; /* who is at the other end of the monitor, pid */
- Eterm name; /* port id */
- Uint32 ref[ERTS_MAX_REF_NUMBERS]; /* box contents of a ref */
+ Eterm port_id;
+ ErtsMonitor *mon;
} demonitor;
} u;
} ;
@@ -980,7 +968,6 @@ typedef int (*ErtsProc2PortSigCallback)(Port *,
typedef enum {
ERTS_PORT_OP_BADARG,
- ERTS_PORT_OP_CALLER_EXIT,
ERTS_PORT_OP_BUSY,
ERTS_PORT_OP_BUSY_SCHEDULED,
ERTS_PORT_OP_SCHEDULED,
@@ -988,16 +975,6 @@ typedef enum {
ERTS_PORT_OP_DONE
} ErtsPortOpResult;
-ErtsPortOpResult
-erts_schedule_proc2port_signal(Process *,
- Port *,
- Eterm,
- Eterm *,
- ErtsProc2PortSigData *,
- int,
- ErtsPortTaskHandle *,
- ErtsProc2PortSigCallback);
-
int erts_deliver_port_exit(Port *, Eterm, Eterm, int, int);
/*
@@ -1026,32 +1003,13 @@ ErtsPortOpResult erts_port_command(Process *, int, Port *, Eterm, Eterm *);
ErtsPortOpResult erts_port_output(Process *, int, Port *, Eterm, Eterm, Eterm *);
ErtsPortOpResult erts_port_exit(Process *, int, Port *, Eterm, Eterm, Eterm *);
ErtsPortOpResult erts_port_connect(Process *, int, Port *, Eterm, Eterm, Eterm *);
-ErtsPortOpResult erts_port_link(Process *, Port *, Eterm, Eterm *);
-ErtsPortOpResult erts_port_unlink(Process *, Port *, Eterm, Eterm *);
+ErtsPortOpResult erts_port_link(Process *, Port *, ErtsLink *, Eterm *);
+ErtsPortOpResult erts_port_unlink(Process *, Port *, ErtsLink *, Eterm *);
ErtsPortOpResult erts_port_control(Process *, Port *, unsigned int, Eterm, Eterm *);
ErtsPortOpResult erts_port_call(Process *, Port *, unsigned int, Eterm, Eterm *);
ErtsPortOpResult erts_port_info(Process *, Port *, Eterm, Eterm *);
-
-/* Creates monitor between Origin and Target. Ref must be initialized to
- * a reference (ref may be rewritten to be used to serve additionally as a
- * signal id). Name is atom if user monitors port by name or NIL */
-ErtsPortOpResult erts_port_monitor(Process *origin, Port *target, Eterm name,
- Eterm *ref);
-
-typedef enum {
- /* Normal demonitor rules apply with locking and reductions bump */
- ERTS_PORT_DEMONITOR_NORMAL = 1,
- /* Relaxed demonitor rules when process is about to die, which means that
- * pid lookup won't work, locks won't work, no reductions bump. */
- ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED = 2,
-} ErtsDemonitorMode;
-
-/* Removes monitor between origin and target, identified by ref.
- * origin_is_dying can be 0 (false, normal locking rules and reductions bump
- * apply) or 1 (true, in case when we avoid origin locking) */
-ErtsPortOpResult erts_port_demonitor(Process *origin, ErtsDemonitorMode mode,
- Port *target, Eterm ref,
- Eterm *trap_ref);
+ErtsPortOpResult erts_port_monitor(Process *, Port *, ErtsMonitor *);
+ErtsPortOpResult erts_port_demonitor(Process *, Port *, ErtsMonitor *);
/* defined in erl_bif_port.c */
Port *erts_sig_lookup_port(Process *c_p, Eterm id_or_name);
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 3102e44c11..c8f2e88127 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -36,6 +36,7 @@
#include "erl_check_io.h"
#include "dtrace-wrapper.h"
#include "lttng-wrapper.h"
+#include "erl_check_io.h"
#include <stdarg.h>
/*
@@ -83,15 +84,12 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q
#define LTTNG_DRIVER(TRACEPOINT, PP) do {} while(0)
#endif
-#define ERTS_SMP_LC_VERIFY_RQ(RQ, PP) \
- do { \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); \
- ERTS_SMP_LC_ASSERT((RQ) == ((ErtsRunQueue *) \
- erts_smp_atomic_read_nob(&(PP)->run_queue))); \
+#define ERTS_LC_VERIFY_RQ(RQ, PP) \
+ do { \
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); \
+ ERTS_LC_ASSERT((RQ) == erts_get_runq_port((PP))); \
} while (0)
-erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
-
#define ERTS_PT_STATE_SCHEDULED 0
#define ERTS_PT_STATE_ABORTED 1
#define ERTS_PT_STATE_EXECUTING 2
@@ -99,7 +97,9 @@ erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
typedef union {
struct { /* I/O tasks */
ErlDrvEvent event;
- ErlDrvEventData event_data;
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ int is_scheduler_event;
+#endif
} io;
struct {
ErtsProc2PortSigCallback callback;
@@ -108,7 +108,7 @@ typedef union {
} ErtsPortTaskTypeData;
struct ErtsPortTask_ {
- erts_smp_atomic32_t state;
+ erts_atomic32_t state;
ErtsPortTaskType type;
union {
struct {
@@ -126,9 +126,7 @@ struct ErtsPortTaskHandleList_ {
ErtsPortTaskHandle handle;
union {
ErtsPortTaskHandleList *next;
-#ifdef ERTS_SMP
ErtsThrPrgrLaterOp release;
-#endif
} u;
};
@@ -146,40 +144,37 @@ struct ErtsPortTaskBusyCallerTable_ {
ErtsPortTaskBusyCaller pre_alloc_busy_caller;
};
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+erts_atomic_t erts_port_task_outstanding_io_tasks;
+#endif
static void begin_port_cleanup(Port *pp,
ErtsPortTask **execq,
int *processing_busy_q_p);
-ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_task,
- ErtsPortTask,
- 1000,
- ERTS_ALC_T_PORT_TASK)
+ERTS_THR_PREF_QUICK_ALLOC_IMPL(port_task,
+ ErtsPortTask,
+ 1000,
+ ERTS_ALC_T_PORT_TASK)
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(busy_caller_table,
ErtsPortTaskBusyCallerTable,
50,
ERTS_ALC_T_BUSY_CALLER_TAB)
-#ifdef ERTS_SMP
static void
call_port_task_free(void *vptp)
{
port_task_free((ErtsPortTask *) vptp);
}
-#endif
static ERTS_INLINE void
schedule_port_task_free(ErtsPortTask *ptp)
{
-#ifdef ERTS_SMP
erts_schedule_thr_prgr_later_cleanup_op(call_port_task_free,
(void *) ptp,
&ptp->u.release,
sizeof(ErtsPortTask));
-#else
- port_task_free(ptp);
-#endif
}
static ERTS_INLINE ErtsPortTask *
@@ -199,7 +194,7 @@ p2p_sig_data_init(ErtsPortTask *ptp)
ptp->type = ERTS_PORT_TASK_PROC_SIG;
ptp->u.alive.flags = ERTS_PT_FLG_SIG_DEP;
- erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
+ erts_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
ASSERT(ptp == p2p_sig_data_to_task(&ptp->u.alive.td.psig.data));
@@ -290,7 +285,7 @@ popped_from_busy_queue(Port *pp, ErtsPortTask *ptp, int last)
#ifdef DEBUG
erts_aint32_t flags =
#endif
- erts_smp_atomic32_read_band_nob(
+ erts_atomic32_read_band_nob(
&pp->sched.flags,
~ERTS_PTS_FLG_HAVE_BUSY_TASKS);
ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
@@ -337,7 +332,7 @@ busy_wait_move_to_busy_queue(Port *pp, ErtsPortTask *ptp)
#ifdef DEBUG
flags =
#endif
- erts_smp_atomic32_read_bor_nob(&pp->sched.flags,
+ erts_atomic32_read_bor_nob(&pp->sched.flags,
ERTS_PTS_FLG_HAVE_BUSY_TASKS);
ASSERT(!(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS));
@@ -477,7 +472,7 @@ no_sig_dep_move_from_busyq(Port *pp)
int bix;
erts_aint32_t flags =
#endif
- erts_smp_atomic32_read_band_nob(
+ erts_atomic32_read_band_nob(
&pp->sched.flags,
~ERTS_PTS_FLG_HAVE_BUSY_TASKS);
ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
@@ -510,11 +505,11 @@ chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue)
if (!first) {
ASSERT(!tabp);
ASSERT(!pp->sched.taskq.local.busy.last);
- ASSERT(!(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS));
+ ASSERT(!(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS));
return;
}
- ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+ ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
ASSERT(tabp);
tot_count = 0;
@@ -570,13 +565,13 @@ chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue)
static ERTS_INLINE void
reset_port_task_handle(ErtsPortTaskHandle *pthp)
{
- erts_smp_atomic_set_relb(pthp, (erts_aint_t) NULL);
+ erts_atomic_set_relb(pthp, (erts_aint_t) NULL);
}
static ERTS_INLINE ErtsPortTask *
handle2task(ErtsPortTaskHandle *pthp)
{
- return (ErtsPortTask *) erts_smp_atomic_read_acqb(pthp);
+ return (ErtsPortTask *) erts_atomic_read_acqb(pthp);
}
static ERTS_INLINE void
@@ -589,12 +584,26 @@ reset_handle(ErtsPortTask *ptp)
}
static ERTS_INLINE void
-reset_executed_io_task_handle(ErtsPortTask *ptp)
+reset_executed_io_task_handle(Port *prt, ErtsPortTask *ptp)
{
if (ptp->u.alive.handle) {
ASSERT(ptp == handle2task(ptp->u.alive.handle));
- erts_io_notify_port_task_executed(ptp->u.alive.handle);
- reset_port_task_handle(ptp->u.alive.handle);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event) {
+ if ((erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLG_CHECK_FD_CLEANUP)) {
+ erts_io_notify_port_task_executed(ptp->type, ptp->u.alive.handle,
+ reset_port_task_handle);
+ erts_atomic32_read_band_nob(&prt->state, ~ERTS_PORT_SFLG_CHECK_FD_CLEANUP);
+ } else {
+ reset_port_task_handle(ptp->u.alive.handle);
+ }
+ } else
+#endif
+ {
+ /* The port task handle is reset inside task_executed */
+ erts_io_notify_port_task_executed(ptp->type, ptp->u.alive.handle,
+ reset_port_task_handle);
+ }
}
}
@@ -603,7 +612,7 @@ set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
{
ptp->u.alive.handle = pthp;
if (pthp) {
- erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp);
+ erts_atomic_set_relb(pthp, (erts_aint_t) ptp);
ASSERT(ptp == handle2task(ptp->u.alive.handle));
}
}
@@ -617,7 +626,7 @@ set_tmp_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
* IMPORTANT! Task either need to be aborted, or task handle
* need to be detached before thread progress has been made.
*/
- erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp);
+ erts_atomic_set_relb(pthp, (erts_aint_t) ptp);
}
}
@@ -635,20 +644,20 @@ check_unset_busy_port_q(Port *pp,
int resume_procs = 0;
ASSERT(bpq);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
erts_port_task_sched_lock(&pp->sched);
- qsize = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size);
- low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low);
+ qsize = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->size);
+ low = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low);
if (qsize < low) {
erts_aint32_t mask = ~(ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q
| ERTS_PTS_FLG_BUSY_PORT_Q);
- flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, mask);
+ flags = erts_atomic32_read_band_relb(&pp->sched.flags, mask);
if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q)
resume_procs = 1;
}
else if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) {
- flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags,
+ flags = erts_atomic32_read_band_relb(&pp->sched.flags,
~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
flags &= ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q;
}
@@ -673,16 +682,16 @@ aborted_proc2port_data(Port *pp, ErlDrvSizeT size)
bpq = pp->sched.taskq.bpq;
- qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size,
+ qsz = (ErlDrvSizeT) erts_atomic_add_read_acqb(&bpq->size,
(erts_aint_t) -size);
ASSERT(qsz + size > qsz);
- flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ flags = erts_atomic32_read_nob(&pp->sched.flags);
ASSERT(pp->sched.taskq.bpq);
if ((flags & (ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q
| ERTS_PTS_FLG_BUSY_PORT_Q)) != ERTS_PTS_FLG_BUSY_PORT_Q)
return;
- if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low))
- erts_smp_atomic32_read_bor_nob(&pp->sched.flags,
+ if (qsz < (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low))
+ erts_atomic32_read_bor_nob(&pp->sched.flags,
ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
}
@@ -700,13 +709,13 @@ dequeued_proc2port_data(Port *pp, ErlDrvSizeT size)
bpq = pp->sched.taskq.bpq;
- qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size,
+ qsz = (ErlDrvSizeT) erts_atomic_add_read_acqb(&bpq->size,
(erts_aint_t) -size);
ASSERT(qsz + size > qsz);
- flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ flags = erts_atomic32_read_nob(&pp->sched.flags);
if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q))
return;
- if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->low))
+ if (qsz < (ErlDrvSizeT) erts_atomic_read_acqb(&bpq->low))
check_unset_busy_port_q(pp, flags, bpq);
}
@@ -719,19 +728,19 @@ enqueue_proc2port_data(Port *pp,
if (sigdp && bpq) {
ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp);
if (size) {
- erts_aint_t asize = erts_smp_atomic_add_read_acqb(&bpq->size,
+ erts_aint_t asize = erts_atomic_add_read_acqb(&bpq->size,
(erts_aint_t) size);
ErlDrvSizeT qsz = (ErlDrvSizeT) asize;
ASSERT(qsz - size < qsz);
if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q) && qsz > bpq->high) {
- flags = erts_smp_atomic32_read_bor_acqb(&pp->sched.flags,
+ flags = erts_atomic32_read_bor_acqb(&pp->sched.flags,
ERTS_PTS_FLG_BUSY_PORT_Q);
flags |= ERTS_PTS_FLG_BUSY_PORT_Q;
- qsz = (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->size);
- if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) {
- flags = (erts_smp_atomic32_read_bor_relb(
+ qsz = (ErlDrvSizeT) erts_atomic_read_acqb(&bpq->size);
+ if (qsz < (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low)) {
+ flags = (erts_atomic32_read_bor_relb(
&pp->sched.flags,
ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q));
flags |= ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q;
@@ -779,18 +788,18 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp
erts_aint32_t flags;
pp->sched.taskq.bpq = NULL;
flags = ~(ERTS_PTS_FLG_BUSY_PORT_Q|ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
- flags = erts_smp_atomic32_read_band_acqb(&pp->sched.flags, flags);
+ flags = erts_atomic32_read_band_acqb(&pp->sched.flags, flags);
if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q)
resume_procs = 1;
}
else {
if (!low)
- low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low);
+ low = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low);
else {
if (bpq->high < low)
bpq->high = low;
- erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low);
+ erts_atomic_set_relb(&bpq->low, (erts_aint_t) low);
written = 1;
}
@@ -799,19 +808,19 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp
else {
if (low > high) {
low = high;
- erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low);
+ erts_atomic_set_relb(&bpq->low, (erts_aint_t) low);
}
bpq->high = high;
written = 1;
}
if (written) {
- ErlDrvSizeT size = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size);
+ ErlDrvSizeT size = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->size);
if (size > high)
- erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ erts_atomic32_read_bor_relb(&pp->sched.flags,
ERTS_PTS_FLG_BUSY_PORT_Q);
else if (size < low)
- erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ erts_atomic32_read_bor_relb(&pp->sched.flags,
ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
}
}
@@ -830,32 +839,27 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp
* No-suspend handles.
*/
-#ifdef ERTS_SMP
static void
free_port_task_handle_list(void *vpthlp)
{
erts_free(ERTS_ALC_T_PT_HNDL_LIST, vpthlp);
}
-#endif
static void
schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
{
-#ifdef ERTS_SMP
erts_schedule_thr_prgr_later_cleanup_op(free_port_task_handle_list,
(void *) pthlp,
&pthlp->u.release,
sizeof(ErtsPortTaskHandleList));
-#else
- erts_free(ERTS_ALC_T_PT_HNDL_LIST, pthlp);
-#endif
}
static ERTS_INLINE void
-abort_nosuspend_task(Port *pp,
- ErtsPortTaskType type,
- ErtsPortTaskTypeData *tdp,
- int bpq_data)
+abort_signal_task(Port *pp,
+ int abort_type,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
{
ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
@@ -863,24 +867,34 @@ abort_nosuspend_task(Port *pp,
if (!bpq_data)
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
- ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ abort_type,
&tdp->psig.data);
else {
ErlDrvSizeT size = erts_proc2port_sig_command_data_size(&tdp->psig.data);
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
- ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ abort_type,
&tdp->psig.data);
aborted_proc2port_data(pp, size);
}
}
+
+static ERTS_INLINE void
+abort_nosuspend_task(Port *pp,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
+{
+ abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND, type, tdp, bpq_data);
+}
+
static ErtsPortTaskHandleList *
get_free_nosuspend_handles(Port *pp)
{
ErtsPortTaskHandleList *nshp, *last_nshp = NULL;
- ERTS_SMP_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched));
+ ERTS_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched));
nshp = pp->sched.taskq.local.busy.nosuspend;
@@ -896,7 +910,7 @@ get_free_nosuspend_handles(Port *pp)
pp->sched.taskq.local.busy.nosuspend = last_nshp->u.next;
last_nshp->u.next = NULL;
if (!pp->sched.taskq.local.busy.nosuspend)
- erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ erts_atomic32_read_band_nob(&pp->sched.flags,
~ERTS_PTS_FLG_HAVE_NS_TASKS);
}
return nshp;
@@ -919,7 +933,7 @@ free_nosuspend_handles(ErtsPortTaskHandleList *free_nshp)
static ERTS_INLINE void
enqueue_port(ErtsRunQueue *runq, Port *pp)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
pp->sched.next = NULL;
if (runq->ports.end) {
ASSERT(runq->ports.start);
@@ -933,19 +947,17 @@ enqueue_port(ErtsRunQueue *runq, Port *pp)
runq->ports.end = pp;
ASSERT(runq->ports.start && runq->ports.end);
- erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
+ erts_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
-#ifdef ERTS_SMP
- if (runq->halt_in_progress)
+ if (ERTS_RUNQ_FLGS_GET_NOB(runq) & ERTS_RUNQ_FLG_HALTING)
erts_non_empty_runq(runq);
-#endif
}
static ERTS_INLINE Port *
pop_port(ErtsRunQueue *runq)
{
Port *pp = runq->ports.start;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
if (!pp) {
ASSERT(!runq->ports.end);
}
@@ -955,7 +967,7 @@ pop_port(ErtsRunQueue *runq)
ASSERT(runq->ports.end == pp);
runq->ports.end = NULL;
}
- erts_smp_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
+ erts_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
}
ASSERT(runq->ports.start || !runq->ports.end);
@@ -982,7 +994,7 @@ enqueue_task(Port *pp,
if (ns_pthlp)
fail_flags |= ERTS_PTS_FLG_BUSY_PORT;
erts_port_task_sched_lock(&pp->sched);
- flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ flags = erts_atomic32_read_nob(&pp->sched.flags);
if (flags & fail_flags)
res = 0;
else {
@@ -1013,7 +1025,7 @@ enqueue_task(Port *pp,
static ERTS_INLINE void
prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
{
- erts_aint32_t act = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ erts_aint32_t act = erts_atomic32_read_nob(&pp->sched.flags);
if (!pp->sched.taskq.local.busy.first || (act & ERTS_PTS_FLG_BUSY_PORT)) {
*execqp = pp->sched.taskq.local.first;
@@ -1034,7 +1046,7 @@ prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
new &= ~ERTS_PTS_FLG_IN_RUNQ;
new |= ERTS_PTS_FLG_EXEC;
- act = erts_smp_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp);
+ act = erts_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp);
ASSERT(act & ERTS_PTS_FLG_IN_RUNQ);
@@ -1061,7 +1073,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
*execq = NULL;
- act = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ act = erts_atomic32_read_nob(&pp->sched.flags);
if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq);
@@ -1078,7 +1090,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
if (act & ERTS_PTS_FLG_HAVE_TASKS)
new |= ERTS_PTS_FLG_IN_RUNQ;
- act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
+ act = erts_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM));
@@ -1104,7 +1116,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
static ERTS_INLINE erts_aint32_t
select_queue_for_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
{
- erts_aint32_t flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ erts_aint32_t flags = erts_atomic32_read_nob(&pp->sched.flags);
if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
flags = check_unset_busy_port_q(pp, flags, pp->sched.taskq.bpq);
@@ -1214,7 +1226,7 @@ fetch_in_queue(Port *pp, ErtsPortTask **execqp)
if (ptp)
*execqp = ptp->u.alive.next;
else
- erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ erts_atomic32_read_band_nob(&pp->sched.flags,
~ERTS_PTS_FLG_HAVE_TASKS);
@@ -1277,7 +1289,7 @@ erl_drv_consume_timeslice(ErlDrvPort dprt, int percent)
void
erts_port_task_tmp_handle_detach(ErtsPortTaskHandle *pthp)
{
- ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
+ ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying());
reset_port_task_handle(pthp);
}
@@ -1290,9 +1302,7 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp)
{
int res;
ErtsPortTask *ptp;
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
-#endif
ptp = handle2task(pthp);
if (!ptp)
@@ -1302,41 +1312,41 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp)
#ifdef DEBUG
ErtsPortTaskHandle *saved_pthp = ptp->u.alive.handle;
- ERTS_SMP_READ_MEMORY_BARRIER;
- old_state = erts_smp_atomic32_read_nob(&ptp->state);
+ ERTS_THR_READ_MEMORY_BARRIER;
+ old_state = erts_atomic32_read_nob(&ptp->state);
if (old_state == ERTS_PT_STATE_SCHEDULED) {
ASSERT(!saved_pthp || saved_pthp == pthp);
}
#endif
- old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ old_state = erts_atomic32_cmpxchg_nob(&ptp->state,
ERTS_PT_STATE_ABORTED,
ERTS_PT_STATE_SCHEDULED);
if (old_state != ERTS_PT_STATE_SCHEDULED)
res = - 1; /* Task already aborted, executing, or executed */
else {
-
reset_port_task_handle(pthp);
- switch (ptp->type) {
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ switch (ptp->type) {
case ERTS_PORT_TASK_INPUT:
case ERTS_PORT_TASK_OUTPUT:
- case ERTS_PORT_TASK_EVENT:
- ASSERT(erts_smp_atomic_read_nob(
- &erts_port_task_outstanding_io_tasks) > 0);
- erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
+ if (ptp->u.alive.td.io.is_scheduler_event) {
+ ASSERT(erts_atomic_read_nob(
+ &erts_port_task_outstanding_io_tasks) > 0);
+ erts_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
+ }
break;
default:
break;
}
+#endif
res = 0;
}
}
-#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
return res;
}
@@ -1345,12 +1355,10 @@ void
erts_port_task_abort_nosuspend_tasks(Port *pp)
{
ErtsPortTaskHandleList *abort_list;
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl = ERTS_THR_PRGR_DHANDLE_INVALID;
-#endif
erts_port_task_sched_lock(&pp->sched);
- erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ erts_atomic32_read_band_nob(&pp->sched.flags,
~ERTS_PTS_FLG_HAVE_NS_TASKS);
abort_list = pp->sched.taskq.local.busy.nosuspend;
pp->sched.taskq.local.busy.nosuspend = NULL;
@@ -1370,40 +1378,34 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
pthlp = abort_list;
abort_list = pthlp->u.next;
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
dhndl = erts_thr_progress_unmanaged_delay();
-#endif
pthp = &pthlp->handle;
ptp = handle2task(pthp);
if (!ptp) {
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
schedule_port_task_handle_list_free(pthlp);
continue;
}
#ifdef DEBUG
saved_pthp = ptp->u.alive.handle;
- ERTS_SMP_READ_MEMORY_BARRIER;
- old_state = erts_smp_atomic32_read_nob(&ptp->state);
+ ERTS_THR_READ_MEMORY_BARRIER;
+ old_state = erts_atomic32_read_nob(&ptp->state);
if (old_state == ERTS_PT_STATE_SCHEDULED) {
ASSERT(saved_pthp == pthp);
}
#endif
- old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ old_state = erts_atomic32_cmpxchg_nob(&ptp->state,
ERTS_PT_STATE_ABORTED,
ERTS_PT_STATE_SCHEDULED);
if (old_state != ERTS_PT_STATE_SCHEDULED) {
/* Task already aborted, executing, or executed */
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
schedule_port_task_handle_list_free(pthlp);
continue;
}
@@ -1413,10 +1415,8 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
type = ptp->type;
td = ptp->u.alive.td;
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
schedule_port_task_handle_list_free(pthlp);
abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL);
@@ -1435,10 +1435,8 @@ erts_port_task_schedule(Eterm id,
{
ErtsProc2PortSigData *sigdp = NULL;
ErtsPortTaskHandleList *ns_pthlp = NULL;
-#ifdef ERTS_SMP
ErtsRunQueue *xrunq;
ErtsThrPrgrDelayHandle dhndl;
-#endif
ErtsRunQueue *runq;
Port *pp;
ErtsPortTask *ptp = NULL;
@@ -1449,30 +1447,26 @@ erts_port_task_schedule(Eterm id,
ASSERT(is_internal_port(id));
-#ifdef ERTS_SMP
dhndl = erts_thr_progress_unmanaged_delay();
-#endif
pp = erts_port_lookup_raw(id);
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
if (pp)
erts_port_inc_refc(pp);
erts_thr_progress_unmanaged_continue(dhndl);
}
-#endif
-
- if (!pp)
- goto fail;
if (type != ERTS_PORT_TASK_PROC_SIG) {
+ if (!pp)
+ goto fail;
+
ptp = port_task_alloc();
ptp->type = type;
ptp->u.alive.flags = 0;
- erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
+ erts_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
set_handle(ptp, pthp);
}
@@ -1483,17 +1477,14 @@ erts_port_task_schedule(Eterm id,
va_list argp;
va_start(argp, type);
ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ ptp->u.alive.td.io.is_scheduler_event = va_arg(argp, int);
+#endif
va_end(argp);
- erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
- break;
- }
- case ERTS_PORT_TASK_EVENT: {
- va_list argp;
- va_start(argp, type);
- ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
- ptp->u.alive.td.io.event_data = va_arg(argp, ErlDrvEventData);
- va_end(argp);
- erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event)
+ erts_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
+#endif
break;
}
case ERTS_PORT_TASK_PROC_SIG: {
@@ -1504,6 +1495,9 @@ erts_port_task_schedule(Eterm id,
ptp->u.alive.td.psig.callback = va_arg(argp, ErtsProc2PortSigCallback);
ptp->u.alive.flags |= va_arg(argp, int);
va_end(argp);
+ if (!pp)
+ goto fail;
+
if (!(ptp->u.alive.flags & ERTS_PT_FLG_NOSUSPEND))
set_tmp_handle(ptp, pthp);
else {
@@ -1545,7 +1539,7 @@ erts_port_task_schedule(Eterm id,
if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
new |= ERTS_PTS_FLG_IN_RUNQ;
- act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
+ act = erts_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
if (exp == act) {
if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
@@ -1567,70 +1561,63 @@ erts_port_task_schedule(Eterm id,
/* Enqueue port on run-queue */
runq = erts_port_runq(pp);
- if (!runq)
- ERTS_INTERNAL_ERROR("Missing run-queue");
-#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
- ERTS_SMP_LC_ASSERT(runq != xrunq);
- ERTS_SMP_LC_VERIFY_RQ(runq, pp);
+ ERTS_LC_ASSERT(runq != xrunq);
+ ERTS_LC_VERIFY_RQ(runq, pp);
if (xrunq) {
/* Emigrate port ... */
- erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
- erts_smp_runq_unlock(runq);
+ erts_set_runq_port(pp, xrunq);
+ erts_runq_unlock(runq);
runq = erts_port_runq(pp);
- if (!runq)
- ERTS_INTERNAL_ERROR("Missing run-queue");
}
-#endif
enqueue_port(runq, pp);
- erts_smp_runq_unlock(runq);
+ erts_runq_unlock(runq);
- erts_smp_notify_inc_runq(runq);
+ erts_notify_inc_runq(runq);
done:
if (prof_runnable_ports)
erts_port_task_sched_unlock(&pp->sched);
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
-#endif
return 0;
abort_nosuspend:
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
-#endif
abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0);
ASSERT(ns_pthlp);
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
- if (ptp)
- port_task_free(ptp);
+
+ ASSERT(ptp);
+ port_task_free(ptp);
return 0;
fail:
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
-#endif
+
+ if (ptp) {
+ if (ptp->type == ERTS_PORT_TASK_PROC_SIG)
+ abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT,
+ ptp->type, &ptp->u.alive.td, 0);
+ port_task_free(ptp);
+ }
if (ns_pthlp)
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
- if (ptp)
- port_task_free(ptp);
-
return -1;
}
@@ -1640,14 +1627,12 @@ erts_port_task_free_port(Port *pp)
erts_aint32_t flags;
ErtsRunQueue *runq;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD));
runq = erts_port_runq(pp);
- if (!runq)
- ERTS_INTERNAL_ERROR("Missing run-queue");
erts_port_task_sched_lock(&pp->sched);
- flags = erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ flags = erts_atomic32_read_bor_relb(&pp->sched.flags,
ERTS_PTS_FLG_EXIT);
erts_port_task_sched_unlock(&pp->sched);
erts_atomic32_read_bset_relb(&pp->state,
@@ -1657,7 +1642,7 @@ erts_port_task_free_port(Port *pp)
| ERTS_PORT_SFLG_FREE),
ERTS_PORT_SFLG_FREE);
- erts_smp_runq_unlock(runq);
+ erts_runq_unlock(runq);
if (!(flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
begin_port_cleanup(pp, NULL, NULL);
@@ -1670,34 +1655,34 @@ erts_port_task_free_port(Port *pp)
* scheduling of processes. Run-queue lock should be held by caller.
*/
-int
+void
erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
{
Port *pp;
ErtsPortTask *execq;
int processing_busy_q;
- int res = 0;
int vreds = 0;
int reds = 0;
- erts_aint_t io_tasks_executed = 0;
int fpe_was_unmasked;
erts_aint32_t state;
int active;
Uint64 start_time = 0;
ErtsSchedulerData *esdp = runq->scheduler;
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ erts_aint_t io_tasks_executed = 0;
+#endif
ERTS_MSACC_PUSH_STATE_M();
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
pp = pop_port(runq);
if (!pp) {
- res = 0;
goto done;
}
- ERTS_SMP_LC_VERIFY_RQ(runq, pp);
+ ERTS_LC_VERIFY_RQ(runq, pp);
- erts_smp_runq_unlock(runq);
+ erts_runq_unlock(runq);
*curr_port_pp = pp;
@@ -1705,19 +1690,19 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no);
int migrated = old && old != esdp->no;
- erts_smp_spin_lock(&erts_sched_stat.lock);
+ erts_spin_lock(&erts_sched_stat.lock);
erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++;
erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++;
if (migrated) {
erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++;
erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++;
}
- erts_smp_spin_unlock(&erts_sched_stat.lock);
+ erts_spin_unlock(&erts_sched_stat.lock);
}
prepare_exec(pp, &execq, &processing_busy_q);
- erts_smp_port_lock(pp);
+ erts_port_lock(pp);
/* trace port scheduling, in */
if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
@@ -1739,7 +1724,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
if (!ptp)
break;
- task_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ task_state = erts_atomic32_cmpxchg_nob(&ptp->state,
ERTS_PT_STATE_EXECUTING,
ERTS_PT_STATE_SCHEDULED);
if (task_state != ERTS_PT_STATE_SCHEDULED) {
@@ -1751,8 +1736,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
start_time = erts_timestamp_millis();
}
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_CHK_NO_PROC_LOCKS;
ASSERT(pp->drv_ptr);
switch (ptp->type) {
@@ -1781,8 +1766,11 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
for input and output */
(*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
- reset_executed_io_task_handle(ptp);
- io_tasks_executed++;
+ reset_executed_io_task_handle(pp, ptp);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event)
+ io_tasks_executed++;
+#endif
break;
case ERTS_PORT_TASK_OUTPUT:
reds = ERTS_PORT_REDS_OUTPUT;
@@ -1791,19 +1779,11 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
LTTNG_DRIVER(driver_ready_output, pp);
(*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data,
ptp->u.alive.td.io.event);
- reset_executed_io_task_handle(ptp);
- io_tasks_executed++;
- break;
- case ERTS_PORT_TASK_EVENT:
- reds = ERTS_PORT_REDS_EVENT;
- ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
- DTRACE_DRIVER(driver_event, pp);
- LTTNG_DRIVER(driver_event, pp);
- (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data,
- ptp->u.alive.td.io.event,
- ptp->u.alive.td.io.event_data);
- reset_executed_io_task_handle(ptp);
- io_tasks_executed++;
+ reset_executed_io_task_handle(pp, ptp);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (ptp->u.alive.td.io.is_scheduler_event)
+ io_tasks_executed++;
+#endif
break;
case ERTS_PORT_TASK_PROC_SIG: {
ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data;
@@ -1869,18 +1849,17 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_unblock_fpe(fpe_was_unmasked);
ERTS_MSACC_POP_STATE_M();
-
+#if ERTS_POLL_USE_SCHEDULER_POLLING
if (io_tasks_executed) {
- ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
+ ASSERT(erts_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
>= io_tasks_executed);
- erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
+ erts_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
-1*io_tasks_executed);
}
-
-#ifdef ERTS_SMP
- ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
#endif
+ ASSERT(runq == erts_get_runq_port(pp));
+
active = finalize_exec(pp, &execq, processing_busy_q);
reds = pp->reds - vreds;
@@ -1889,54 +1868,42 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
*curr_port_pp = NULL;
- erts_smp_runq_lock(runq);
+ erts_runq_lock(runq);
if (active) {
-#ifdef ERTS_SMP
ErtsRunQueue *xrunq;
-#endif
ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD));
-#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
- ERTS_SMP_LC_ASSERT(runq != xrunq);
- ERTS_SMP_LC_VERIFY_RQ(runq, pp);
+ ERTS_LC_ASSERT(runq != xrunq);
+ ERTS_LC_VERIFY_RQ(runq, pp);
if (!xrunq) {
-#endif
enqueue_port(runq, pp);
/* No need to notify ourselves about inc in runq. */
-#ifdef ERTS_SMP
}
else {
/* Emigrate port... */
- erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
- erts_smp_runq_unlock(runq);
+ erts_set_runq_port(pp, xrunq);
+ erts_runq_unlock(runq);
xrunq = erts_port_runq(pp);
- ASSERT(xrunq);
enqueue_port(xrunq, pp);
- erts_smp_runq_unlock(xrunq);
- erts_smp_notify_inc_runq(xrunq);
+ erts_runq_unlock(xrunq);
+ erts_notify_inc_runq(xrunq);
- erts_smp_runq_lock(runq);
+ erts_runq_lock(runq);
}
-#endif
}
done:
- res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
- != (erts_aint_t) 0);
runq->scheduler->reductions += reds;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
ERTS_PORT_REDUCTIONS_EXECUTED(esdp, runq, reds);
-
- return res;
}
-#ifdef ERTS_SMP
static void
release_port(void *vport)
{
@@ -1952,7 +1919,6 @@ schedule_release_port(void *vport) {
&pp->common.u.release);
}
-#endif
static void
begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
@@ -1963,7 +1929,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
ErtsPortTaskHandleList *free_nshp = NULL;
ErtsProcList *plp;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
/*
* Abort remaining tasks...
@@ -2036,11 +2002,11 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
qs[i] = ptp->u.alive.next;
/* Normal case here is aborted tasks... */
- state = erts_smp_atomic32_read_nob(&ptp->state);
+ state = erts_atomic32_read_nob(&ptp->state);
if (state == ERTS_PT_STATE_ABORTED)
goto aborted_port_task;
- state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ state = erts_atomic32_cmpxchg_nob(&ptp->state,
ERTS_PT_STATE_EXECUTING,
ERTS_PT_STATE_SCHEDULED);
if (state != ERTS_PT_STATE_SCHEDULED) {
@@ -2067,13 +2033,6 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
DO_WRITE,
1);
break;
- case ERTS_PORT_TASK_EVENT:
- erts_stale_drv_select(pp->common.id,
- ERTS_Port2ErlDrvPort(pp),
- ptp->u.alive.td.io.event,
- 0,
- 1);
- break;
case ERTS_PORT_TASK_DIST_CMD:
break;
case ERTS_PORT_TASK_PROC_SIG: {
@@ -2104,7 +2063,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
}
}
- erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ erts_atomic32_read_band_nob(&pp->sched.flags,
~(ERTS_PTS_FLG_HAVE_BUSY_TASKS
|ERTS_PTS_FLG_HAVE_TASKS
|ERTS_PTS_FLGS_BUSY));
@@ -2146,7 +2105,6 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
/*
* Schedule cleanup of port structure...
*/
-#ifdef ERTS_SMP
/* We might not be a scheduler, eg. traceing to port we are sys_msg_dispatcher */
if (!erts_get_scheduler_data()) {
erts_schedule_misc_aux_work(1, schedule_release_port, (void*)pp);
@@ -2156,26 +2114,15 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
(void *) pp,
&pp->common.u.release);
}
-#else
- pp->cleanup = 1;
-#endif
-}
-
-int
-erts_port_is_scheduled(Port *pp)
-{
- erts_aint32_t flags = erts_smp_atomic32_read_acqb(&pp->sched.flags);
- return (flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)) != 0;
}
-#ifdef ERTS_SMP
void
erts_enqueue_port(ErtsRunQueue *rq, Port *pp)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- ASSERT(rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
- ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ);
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
+ ASSERT(rq == erts_get_runq_port(pp));
+ ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ);
enqueue_port(rq, pp);
}
@@ -2183,16 +2130,14 @@ Port *
erts_dequeue_port(ErtsRunQueue *rq)
{
Port *pp;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
pp = pop_port(rq);
- ASSERT(!pp
- || rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
- ASSERT(!pp || (erts_smp_atomic32_read_nob(&pp->sched.flags)
+ ASSERT(!pp || rq == erts_get_runq_port(pp));
+ ASSERT(!pp || (erts_atomic32_read_nob(&pp->sched.flags)
& ERTS_PTS_FLG_IN_RUNQ));
return pp;
}
-#endif
/*
* Initialize the module.
@@ -2200,8 +2145,11 @@ erts_dequeue_port(ErtsRunQueue *rq)
void
erts_port_task_init(void)
{
- erts_smp_atomic_init_nob(&erts_port_task_outstanding_io_tasks,
- (erts_aint_t) 0);
- init_port_task_alloc();
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ erts_atomic_init_nob(&erts_port_task_outstanding_io_tasks,
+ (erts_aint_t) 0);
+#endif
+ init_port_task_alloc(erts_no_schedulers + erts_no_poll_threads
+ + 1); /* aux_thread */
init_busy_caller_table_alloc();
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 2a6bd165a3..ca5183b305 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,17 +27,19 @@
#ifndef ERTS_PORT_TASK_H_BASIC_TYPES__
#define ERTS_PORT_TASK_H_BASIC_TYPES__
#include "erl_sys_driver.h"
-#include "erl_smp.h"
+#include "erl_threads.h"
#define ERL_PORT_GET_PORT_TYPE_ONLY__
#include "erl_port.h"
#undef ERL_PORT_GET_PORT_TYPE_ONLY__
-typedef erts_smp_atomic_t ErtsPortTaskHandle;
+typedef erts_atomic_t ErtsPortTaskHandle;
#endif
#ifndef ERTS_PORT_TASK_ONLY_BASIC_TYPES__
#ifndef ERL_PORT_TASK_H__
#define ERL_PORT_TASK_H__
+#include "erl_poll.h"
+
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
#if (defined(ERL_PROCESS_C__) \
|| defined(ERL_PORT_TASK_C__) \
@@ -54,19 +56,13 @@ typedef erts_smp_atomic_t ErtsPortTaskHandle;
#define ERTS_PT_FLG_BAD_OUTPUT (1 << 4)
typedef enum {
- ERTS_PORT_TASK_INPUT,
- ERTS_PORT_TASK_OUTPUT,
- ERTS_PORT_TASK_EVENT,
+ ERTS_PORT_TASK_INPUT = 0,
+ ERTS_PORT_TASK_OUTPUT = 1,
ERTS_PORT_TASK_TIMEOUT,
ERTS_PORT_TASK_DIST_CMD,
ERTS_PORT_TASK_PROC_SIG
} ErtsPortTaskType;
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-/* NOTE: Do not access any of the exported variables directly */
-extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
-#endif
-
#define ERTS_PTS_FLG_IN_RUNQ (((erts_aint32_t) 1) << 0)
#define ERTS_PTS_FLG_EXEC (((erts_aint32_t) 1) << 1)
#define ERTS_PTS_FLG_HAVE_TASKS (((erts_aint32_t) 1) << 2)
@@ -98,8 +94,8 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
typedef struct {
ErlDrvSizeT high;
- erts_smp_atomic_t low;
- erts_smp_atomic_t size;
+ erts_atomic_t low;
+ erts_atomic_t size;
} ErtsPortTaskBusyPortQ;
typedef struct ErtsPortTask_ ErtsPortTask;
@@ -124,10 +120,8 @@ typedef struct {
} in;
ErtsPortTaskBusyPortQ *bpq;
} taskq;
- erts_smp_atomic32_t flags;
-#ifdef ERTS_SMP
+ erts_atomic32_t flags;
erts_mtx_t mtx;
-#endif
} ErtsPortTaskSched;
ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp);
@@ -142,8 +136,10 @@ ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp);
ERTS_GLB_INLINE void erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp);
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+#if defined(ERTS_INCLUDE_SCHEDULER_INTERNALS) && ERTS_POLL_USE_SCHEDULER_POLLING
ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
+/* NOTE: Do not access any of the exported variables directly */
+extern erts_atomic_t erts_port_task_outstanding_io_tasks;
#endif
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -151,13 +147,13 @@ ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
ERTS_GLB_INLINE void
erts_port_task_handle_init(ErtsPortTaskHandle *pthp)
{
- erts_smp_atomic_init_nob(pthp, (erts_aint_t) NULL);
+ erts_atomic_init_nob(pthp, (erts_aint_t) NULL);
}
ERTS_GLB_INLINE int
erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp)
{
- return ((void *) erts_smp_atomic_read_acqb(pthp)) != NULL;
+ return ((void *) erts_atomic_read_acqb(pthp)) != NULL;
}
ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
@@ -165,9 +161,9 @@ ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
{
if (bpq) {
erts_aint_t low = (erts_aint_t) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW;
- erts_smp_atomic_init_nob(&bpq->low, low);
+ erts_atomic_init_nob(&bpq->low, low);
bpq->high = (ErlDrvSizeT) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH;
- erts_smp_atomic_init_nob(&bpq->size, (erts_aint_t) 0);
+ erts_atomic_init_nob(&bpq->size, (erts_aint_t) 0);
}
ptsp->taskq.bpq = bpq;
}
@@ -175,9 +171,7 @@ ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
ERTS_GLB_INLINE void
erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
{
-#ifdef ERTS_SMP
char *lock_str = "port_sched_lock";
-#endif
ptsp->next = NULL;
ptsp->taskq.local.busy.first = NULL;
ptsp->taskq.local.busy.last = NULL;
@@ -186,38 +180,26 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
ptsp->taskq.local.first = NULL;
ptsp->taskq.in.first = NULL;
ptsp->taskq.in.last = NULL;
- erts_smp_atomic32_init_nob(&ptsp->flags, 0);
-#ifdef ERTS_SMP
- erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
-#else
- 1
-#endif
- );
-#endif
+ erts_atomic32_init_nob(&ptsp->flags, 0);
+ erts_mtx_init(&ptsp->mtx, lock_str, instr_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
ERTS_GLB_INLINE void
erts_port_task_sched_lock(ErtsPortTaskSched *ptsp)
{
-#ifdef ERTS_SMP
erts_mtx_lock(&ptsp->mtx);
-#endif
}
ERTS_GLB_INLINE void
erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp)
{
-#ifdef ERTS_SMP
erts_mtx_unlock(&ptsp->mtx);
-#endif
}
ERTS_GLB_INLINE int
erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp)
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
return erts_lc_mtx_is_locked(&ptsp->mtx);
#else
return 0;
@@ -228,35 +210,34 @@ erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp)
ERTS_GLB_INLINE void
erts_port_task_fini_sched(ErtsPortTaskSched *ptsp)
{
-#ifdef ERTS_SMP
erts_mtx_destroy(&ptsp->mtx);
-#endif
}
ERTS_GLB_INLINE void
erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp)
{
- erts_smp_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING);
+ erts_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING);
}
-#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-
+#if defined(ERTS_INCLUDE_SCHEDULER_INTERNALS) && ERTS_POLL_USE_SCHEDULER_POLLING
ERTS_GLB_INLINE int
erts_port_task_have_outstanding_io_tasks(void)
{
- return (erts_smp_atomic_read_acqb(&erts_port_task_outstanding_io_tasks)
+ return (erts_atomic_read_acqb(&erts_port_task_outstanding_io_tasks)
!= 0);
}
-
-#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
+#endif
#endif
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-int erts_port_task_execute(ErtsRunQueue *, Port **);
+void erts_port_task_execute(ErtsRunQueue *, Port **);
void erts_port_task_init(void);
#endif
+/* generated for 'port_task' quick allocator */
+void erts_port_task_pre_alloc_init_thread(void);
+
void erts_port_task_tmp_handle_detach(ErtsPortTaskHandle *);
int erts_port_task_abort(ErtsPortTaskHandle *);
@@ -267,15 +248,12 @@ int erts_port_task_schedule(Eterm,
ErtsPortTaskType,
...);
void erts_port_task_free_port(Port *);
-int erts_port_is_scheduled(Port *);
ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data(void);
ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data_extra(size_t extra, void **extra_ptr);
void erts_port_task_free_p2p_sig_data(ErtsProc2PortSigData *sigdp);
-#ifdef ERTS_SMP
void erts_enqueue_port(ErtsRunQueue *rq, Port *pp);
Port *erts_dequeue_port(ErtsRunQueue *rq);
-#endif
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
#endif /* ERL_PORT_TASK_H__ */
#endif /* ERTS_PORT_TASK_ONLY_BASIC_TYPES__ */
diff --git a/erts/emulator/beam/erl_posix_str.c b/erts/emulator/beam/erl_posix_str.c
index deb7e3e173..7b3e640d3f 100644
--- a/erts/emulator/beam/erl_posix_str.c
+++ b/erts/emulator/beam/erl_posix_str.c
@@ -156,6 +156,9 @@ erl_errno_id(error)
#ifdef EFAULT
case EFAULT: return "efault";
#endif
+#ifdef EFTYPE
+ case EFTYPE: return "eftype";
+#endif
#ifdef EFBIG
case EFBIG: return "efbig";
#endif
@@ -351,6 +354,9 @@ erl_errno_id(error)
#if defined(EOPNOTSUPP) && (!defined(ENOTSUP) || (EOPNOTSUPP != ENOTSUP))
case EOPNOTSUPP: return "eopnotsupp";
#endif
+#ifdef EOVERFLOW
+ case EOVERFLOW: return "eoverflow";
+#endif
#ifdef EPERM
case EPERM: return "eperm";
#endif
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index 1a579704a8..990a01b96f 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -382,12 +382,15 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
break;
}
case REF_DEF:
+ if (!ERTS_IS_CRASH_DUMPING)
+ erts_magic_ref_save_bin(obj);
+ /* fall through... */
case EXTERNAL_REF_DEF:
PRINT_STRING(res, fn, arg, "#Ref<");
PRINT_UWORD(res, fn, arg, 'u', 0, 1,
(ErlPfUWord) ref_channel_no(wobj));
ref_num = ref_numbers(wobj);
- for (i = ref_no_of_numbers(wobj)-1; i >= 0; i--) {
+ for (i = ref_no_numbers(wobj)-1; i >= 0; i--) {
PRINT_CHAR(res, fn, arg, '.');
PRINT_UWORD(res, fn, arg, 'u', 0, 1, (ErlPfUWord) ref_num[i]);
}
@@ -526,17 +529,16 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
case EXPORT_DEF:
{
Export* ep = *((Export **) (export_val(wobj) + 1));
- Atom* module = atom_tab(atom_val(ep->code[0]));
- Atom* name = atom_tab(atom_val(ep->code[1]));
+ Atom* module = atom_tab(atom_val(ep->info.mfa.module));
+ Atom* name = atom_tab(atom_val(ep->info.mfa.function));
- PRINT_STRING(res, fn, arg, "#Fun<");
+ PRINT_STRING(res, fn, arg, "fun ");
PRINT_BUF(res, fn, arg, module->name, module->len);
- PRINT_CHAR(res, fn, arg, '.');
+ PRINT_CHAR(res, fn, arg, ':');
PRINT_BUF(res, fn, arg, name->name, name->len);
- PRINT_CHAR(res, fn, arg, '.');
+ PRINT_CHAR(res, fn, arg, '/');
PRINT_SWORD(res, fn, arg, 'd', 0, 1,
- (ErlPfSWord) ep->code[2]);
- PRINT_CHAR(res, fn, arg, '>');
+ (ErlPfSWord) ep->info.mfa.arity);
}
break;
case FUN_DEF:
diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c
new file mode 100644
index 0000000000..f343e984f7
--- /dev/null
+++ b/erts/emulator/beam/erl_proc_sig_queue.c
@@ -0,0 +1,4808 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Process signal queue implementation.
+ *
+ * Author: Rickard Green
+ */
+
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "global.h"
+#include "dist.h"
+#include "erl_process.h"
+#include "erl_port_task.h"
+#include "erl_trace.h"
+#include "beam_bp.h"
+#include "big.h"
+#include "erl_gc.h"
+#include "bif.h"
+#include "erl_bif_unique.h"
+#include "erl_proc_sig_queue.h"
+#include "dtrace-wrapper.h"
+
+#define ERTS_SIG_REDS_CNT_FACTOR 4
+#define ERTS_PROC_SIG_TRACE_COUNT_LIMIT 200
+
+/*
+ * Note that not all signal are handled using this functionality!
+ */
+
+#define ERTS_SIG_Q_OP_MAX 13
+
+#define ERTS_SIG_Q_OP_EXIT 0 /* Exit signal due to bif call */
+#define ERTS_SIG_Q_OP_EXIT_LINKED 1 /* Exit signal due to link break*/
+#define ERTS_SIG_Q_OP_MONITOR_DOWN 2
+#define ERTS_SIG_Q_OP_MONITOR 3
+#define ERTS_SIG_Q_OP_DEMONITOR 4
+#define ERTS_SIG_Q_OP_LINK 5
+#define ERTS_SIG_Q_OP_UNLINK 6
+#define ERTS_SIG_Q_OP_GROUP_LEADER 7
+#define ERTS_SIG_Q_OP_TRACE_CHANGE_STATE 8
+#define ERTS_SIG_Q_OP_PERSISTENT_MON_MSG 9
+#define ERTS_SIG_Q_OP_IS_ALIVE 10
+#define ERTS_SIG_Q_OP_PROCESS_INFO 11
+#define ERTS_SIG_Q_OP_SYNC_SUSPEND 12
+#define ERTS_SIG_Q_OP_RPC ERTS_SIG_Q_OP_MAX
+
+#define ERTS_SIG_Q_TYPE_MAX (ERTS_MON_LNK_TYPE_MAX + 5)
+
+#define ERTS_SIG_Q_TYPE_UNDEFINED \
+ (ERTS_MON_LNK_TYPE_MAX + 1)
+#define ERTS_SIG_Q_TYPE_DIST_LINK \
+ (ERTS_MON_LNK_TYPE_MAX + 2)
+#define ERTS_SIG_Q_TYPE_GEN_EXIT \
+ (ERTS_MON_LNK_TYPE_MAX + 3)
+#define ERTS_SIG_Q_TYPE_DIST_PROC_DEMONITOR \
+ (ERTS_MON_LNK_TYPE_MAX + 4)
+#define ERTS_SIG_Q_TYPE_ADJUST_TRACE_INFO \
+ ERTS_SIG_Q_TYPE_MAX
+
+Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler);
+Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler_high);
+Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler_max);
+
+void
+erts_proc_sig_queue_init(void)
+{
+ ERTS_CT_ASSERT(ERTS_SIG_Q_OP_MASK > ERTS_SIG_Q_OP_MAX);
+ ERTS_CT_ASSERT(ERTS_SIG_Q_OP_MSGQ_LEN_OFFS_MARK > ERTS_SIG_Q_OP_MAX);
+ ERTS_CT_ASSERT(ERTS_SIG_Q_TYPE_MASK >= ERTS_SIG_Q_TYPE_MAX);
+}
+
+typedef struct {
+ int active;
+ int procs;
+ struct {
+ int active;
+#if defined(USE_VM_PROBES)
+ int vm_probes;
+ char receiver_name[DTRACE_TERM_BUF_SIZE];
+#endif
+ int receive_trace;
+ int bp_ix;
+ ErtsMessage **next;
+ ErtsTracingEvent *event;
+ } messages;
+} ErtsSigRecvTracing;
+
+typedef struct {
+ Eterm message;
+ Eterm from;
+ Eterm reason;
+ union {
+ Eterm ref;
+ int normal_kills;
+ } u;
+} ErtsExitSignalData;
+
+typedef struct {
+ Eterm message;
+ Eterm key;
+} ErtsPersistMonMsg;
+
+typedef struct {
+ ErtsSignalCommon common;
+ Eterm local; /* internal pid (immediate) */
+ Eterm remote; /* external pid (heap for it follow) */
+ Eterm heap[EXTERNAL_THING_HEAD_SIZE + 1];
+} ErtsSigDistLinkOp;
+
+typedef struct {
+ ErtsSignalCommon common;
+ Uint flags_on;
+ Uint flags_off;
+ Eterm tracer;
+} ErtsSigTraceInfo;
+
+#define ERTS_SIG_GL_FLG_ACTIVE (((erts_aint_t) 1) << 0)
+#define ERTS_SIG_GL_FLG_RECEIVER (((erts_aint_t) 1) << 1)
+#define ERTS_SIG_GL_FLG_SENDER (((erts_aint_t) 1) << 2)
+
+typedef struct {
+ ErtsSignalCommon common;
+ erts_atomic_t flags;
+ Eterm group_leader;
+ Eterm reply_to;
+ Eterm ref;
+ ErlOffHeap oh;
+ Eterm heap[1];
+} ErtsSigGroupLeader;
+
+typedef struct {
+ Eterm message;
+ Eterm requester;
+} ErtsIsAliveRequest;
+
+typedef struct {
+ Eterm message;
+ Eterm requester;
+ int async;
+} ErtsSyncSuspendRequest;
+
+typedef struct {
+ ErtsMonitorSuspend *mon;
+ ErtsMessage *sync;
+} ErtsProcSigPendingSuspend;
+
+typedef struct {
+ ErtsSignalCommon common;
+ Sint refc;
+ Sint delayed_len;
+ Sint len_offset;
+} ErtsProcSigMsgQLenOffsetMarker;
+
+typedef struct {
+ ErtsSignalCommon common;
+ ErtsProcSigMsgQLenOffsetMarker marker;
+ Sint msgq_len_offset;
+ Eterm requester;
+ Eterm ref;
+ ErtsORefThing oref_thing;
+ Uint reserve_size;
+ Uint len;
+ int flags;
+ int item_ix[1]; /* of len size in reality... */
+} ErtsProcessInfoSig;
+
+#define ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE ((Sint) -1)
+#define ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC ((Sint) -2)
+
+typedef struct {
+ ErtsSignalCommon common;
+ Eterm requester;
+ Eterm (*func)(Process *, void *, int *, ErlHeapFragment **);
+ void *arg;
+ Eterm ref;
+ ErtsORefThing oref_thing;
+} ErtsProcSigRPC;
+
+static int handle_msg_tracing(Process *c_p,
+ ErtsSigRecvTracing *tracing,
+ ErtsMessage ***next_nm_sig);
+static int handle_trace_change_state(Process *c_p,
+ ErtsSigRecvTracing *tracing,
+ Uint16 type,
+ ErtsMessage *sig,
+ ErtsMessage ***next_nm_sig);
+static void getting_unlinked(Process *c_p, Eterm unlinker);
+static void getting_linked(Process *c_p, Eterm linker);
+static void group_leader_reply(Process *c_p, Eterm to,
+ Eterm ref, int success);
+static int stretch_limit(Process *c_p, ErtsSigRecvTracing *tp,
+ int abs_lim, int *limp);
+
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+#define ERTS_PROC_SIG_HDBG_PRIV_CHKQ(P, T, NMN) \
+ do { \
+ ErtsMessage **nm_next__ = *(NMN); \
+ ErtsMessage **nm_last__ = (P)->sig_qs.nmsigs.last; \
+ if (!nm_next__ || !*nm_next__) { \
+ nm_next__ = NULL; \
+ nm_last__ = NULL; \
+ } \
+ proc_sig_hdbg_check_queue((P), \
+ 1, \
+ &(P)->sig_qs.cont, \
+ (P)->sig_qs.cont_last, \
+ nm_next__, \
+ nm_last__, \
+ (T), \
+ NULL, \
+ ERTS_PSFLG_FREE); \
+ } while (0);
+static Sint
+proc_sig_hdbg_check_queue(Process *c_p,
+ int privq,
+ ErtsMessage **sig_next,
+ ErtsMessage **sig_last,
+ ErtsMessage **sig_nm_next,
+ ErtsMessage **sig_nm_last,
+ ErtsSigRecvTracing *tracing,
+ int *found_saved_last_p,
+ erts_aint32_t sig_psflg);
+#else
+#define ERTS_PROC_SIG_HDBG_PRIV_CHKQ(P, T, NMN)
+#endif
+
+typedef struct {
+ ErtsSignalCommon common;
+ Eterm ref;
+ Eterm heap[1];
+} ErtsSigDistProcDemonitor;
+
+static void
+destroy_dist_proc_demonitor(ErtsSigDistProcDemonitor *dmon)
+{
+ Eterm ref = dmon->ref;
+ if (is_external(ref)) {
+ ExternalThing *etp = external_thing_ptr(ref);
+ erts_deref_node_entry(etp->node);
+ }
+ erts_free(ERTS_ALC_T_DIST_DEMONITOR, dmon);
+}
+
+static ERTS_INLINE ErtsSigDistLinkOp *
+make_sig_dist_link_op(int op, Eterm local, Eterm remote)
+{
+ Eterm *hp;
+ ErlOffHeap oh = {0};
+ ErtsSigDistLinkOp *sdlnk = erts_alloc(ERTS_ALC_T_SIG_DATA,
+ sizeof(ErtsSigDistLinkOp));
+ ASSERT(is_internal_pid(local));
+ ASSERT(is_external_pid(remote));
+
+ hp = &sdlnk->heap[0];
+
+ sdlnk->common.tag = ERTS_PROC_SIG_MAKE_TAG(op,
+ ERTS_SIG_Q_TYPE_DIST_LINK,
+ 0);
+ sdlnk->local = local;
+ sdlnk->remote = STORE_NC(&hp, &oh, remote);
+
+ ASSERT(&sdlnk->heap[0] < hp);
+ ASSERT(hp <= &sdlnk->heap[0] + sizeof(sdlnk->heap)/sizeof(sdlnk->heap[0]));
+ ASSERT(boxed_val(sdlnk->remote) == &sdlnk->heap[0]);
+
+ return sdlnk;
+}
+
+static ERTS_INLINE void
+destroy_sig_dist_link_op(ErtsSigDistLinkOp *sdlnk)
+{
+ ASSERT(is_external_pid(sdlnk->remote));
+ ASSERT(boxed_val(sdlnk->remote) == &sdlnk->heap[0]);
+ erts_deref_node_entry(((ExternalThing *) &sdlnk->heap[0])->node);
+ erts_free(ERTS_ALC_T_SIG_DATA, sdlnk);
+}
+
+static ERTS_INLINE ErtsExitSignalData *
+get_exit_signal_data(ErtsMessage *xsig)
+{
+ ASSERT(ERTS_SIG_IS_NON_MSG(xsig));
+ ASSERT((ERTS_PROC_SIG_OP(((ErtsSignal *) xsig)->common.tag)
+ == ERTS_SIG_Q_OP_EXIT)
+ || (ERTS_PROC_SIG_OP(((ErtsSignal *) xsig)->common.tag)
+ == ERTS_SIG_Q_OP_EXIT_LINKED)
+ || (ERTS_PROC_SIG_OP(((ErtsSignal *) xsig)->common.tag)
+ == ERTS_SIG_Q_OP_MONITOR_DOWN));
+ ASSERT(xsig->hfrag.alloc_size > xsig->hfrag.used_size);
+ ASSERT((xsig->hfrag.alloc_size - xsig->hfrag.used_size)*sizeof(UWord)
+ >= sizeof(ErtsExitSignalData));
+ return (ErtsExitSignalData *) (char *) (&xsig->hfrag.mem[0]
+ + xsig->hfrag.used_size);
+}
+
+static ERTS_INLINE void
+destroy_trace_info(ErtsSigTraceInfo *ti)
+{
+ if (is_value(ti->tracer))
+ erts_tracer_update(&ti->tracer, NIL);
+ erts_free(ERTS_ALC_T_SIG_DATA, ti);
+}
+
+static void
+destroy_sig_group_leader(ErtsSigGroupLeader *sgl)
+{
+ erts_cleanup_offheap(&sgl->oh);
+ erts_free(ERTS_ALC_T_SIG_DATA, sgl);
+}
+
+static ERTS_INLINE void
+sig_enqueue_trace(Process *c_p, ErtsMessage **sigp, int op,
+ Process *rp, ErtsMessage ***last_next)
+{
+ switch (op) {
+ case ERTS_SIG_Q_OP_LINK:
+ if (c_p
+ && ((!!IS_TRACED(c_p))
+ & (ERTS_TRACE_FLAGS(c_p) & (F_TRACE_SOL
+ | F_TRACE_SOL1)))) {
+ ErtsSigTraceInfo *ti;
+ Eterm tag;
+ /*
+ * Set on link enabled.
+ *
+ * Prepend a trace-change-state signal before the
+ * link signal...
+ */
+ tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_TRACE_CHANGE_STATE,
+ ERTS_SIG_Q_TYPE_ADJUST_TRACE_INFO,
+ 0);
+ ti = erts_alloc(ERTS_ALC_T_SIG_DATA, sizeof(ErtsSigTraceInfo));
+ ti->common.next = *sigp;
+ ti->common.specific.next = &ti->common.next;
+ ti->common.tag = tag;
+ ti->flags_on = ERTS_TRACE_FLAGS(c_p) & TRACEE_FLAGS;
+ if (!(ti->flags_on & F_TRACE_SOL1))
+ ti->flags_off = 0;
+ else {
+ ti->flags_off = F_TRACE_SOL1|F_TRACE_SOL;
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ ERTS_TRACE_FLAGS(c_p) &= ~(F_TRACE_SOL1|F_TRACE_SOL);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ erts_tracer_update(&ti->tracer, ERTS_TRACER(c_p));
+ *sigp = (ErtsMessage *) ti;
+ if (!*last_next || *last_next == sigp)
+ *last_next = &ti->common.next;
+ }
+ break;
+
+#ifdef USE_VM_PROBES
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED:
+
+ if (DTRACE_ENABLED(process_exit_signal)) {
+ ErtsMessage* sig = *sigp;
+ Uint16 type = ERTS_PROC_SIG_TYPE(((ErtsSignal *) sig)->common.tag);
+ Eterm reason, from;
+
+ if (type == ERTS_SIG_Q_TYPE_GEN_EXIT) {
+ ErtsExitSignalData *xsigd = get_exit_signal_data(sig);
+ reason = xsigd->reason;
+ from = xsigd->from;
+ }
+ else {
+ ErtsLink *lnk = (ErtsLink *) sig, *olnk;
+
+ ASSERT(type == ERTS_LNK_TYPE_PROC
+ || type == ERTS_LNK_TYPE_PORT
+ || type == ERTS_LNK_TYPE_DIST_PROC);
+
+ olnk = erts_link_to_other(lnk, NULL);
+ reason = lnk->other.item;
+ from = olnk->other.item;
+ }
+
+ if (is_pid(from)) {
+
+ DTRACE_CHARBUF(sender_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(receiver_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(reason_buf, DTRACE_TERM_BUF_SIZE);
+
+ if (reason == am_kill) {
+ reason = am_killed;
+ }
+
+ dtrace_pid_str(from, sender_str);
+ dtrace_proc_str(rp, receiver_str);
+ erts_snprintf(reason_buf, sizeof(DTRACE_CHARBUF_NAME(reason_buf)) - 1, "%T", reason);
+ DTRACE3(process_exit_signal, sender_str, receiver_str, reason_buf);
+ }
+ }
+ break;
+
+#endif
+
+ default:
+ break;
+ }
+}
+
+static void
+sig_enqueue_trace_cleanup(ErtsMessage *first, ErtsSignal *sig)
+{
+ ErtsMessage *tmp;
+
+ /* The usual case; no tracing signals... */
+ if (sig == (ErtsSignal *) first) {
+ ASSERT(sig->common.next == NULL);
+ return;
+ }
+
+ /* Got trace signals to clean up... */
+
+ tmp = first;
+
+ while (tmp) {
+ ErtsMessage *tmp_free = tmp;
+ tmp = tmp->next;
+ if (sig != (ErtsSignal *) tmp_free) {
+ switch (ERTS_PROC_SIG_OP(((ErtsSignal *) tmp_free)->common.tag)) {
+ case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
+ destroy_trace_info((ErtsSigTraceInfo *) tmp_free);
+ break;
+ case ERTS_SIG_Q_OP_MONITOR:
+ break; /* ignore flushed pending signal */
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected signal op");
+ break;
+ }
+ }
+ }
+}
+
+#ifdef DEBUG
+static int dbg_count_nmsigs(ErtsMessage *first)
+{
+ ErtsMessage *sig;
+ int cnt = 0;
+
+ for (sig = first; sig; sig = sig->next) {
+ if (ERTS_SIG_IS_NON_MSG(sig))
+ ++cnt;
+ }
+ return cnt;
+}
+#endif
+
+static ERTS_INLINE erts_aint32_t
+enqueue_signals(Process *rp, ErtsMessage *first,
+ ErtsMessage **last, ErtsMessage **last_next,
+ Uint num_msgs,
+ erts_aint32_t in_state)
+{
+ erts_aint32_t state = in_state;
+ ErtsMessage **this = rp->sig_inq.last;
+
+ ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(rp);
+
+ ASSERT(!*this);
+ *this = first;
+ rp->sig_inq.last = last;
+
+ if (!rp->sig_inq.nmsigs.next) {
+ ASSERT(!rp->sig_inq.nmsigs.last);
+ if (ERTS_SIG_IS_NON_MSG(first)) {
+ rp->sig_inq.nmsigs.next = this;
+ }
+ else if (last_next) {
+ ASSERT(first->next && ERTS_SIG_IS_NON_MSG(first->next));
+ rp->sig_inq.nmsigs.next = &first->next;
+ }
+ else
+ goto no_nmsig;
+
+ state = erts_atomic32_read_bor_nob(&rp->state,
+ ERTS_PSFLG_SIG_IN_Q);
+ no_nmsig:
+ ASSERT(!(state & ERTS_PSFLG_SIG_IN_Q));
+ }
+ else {
+ ErtsSignal *sig;
+ ASSERT(rp->sig_inq.nmsigs.last);
+
+ sig = (ErtsSignal *) *rp->sig_inq.nmsigs.last;
+
+ ASSERT(sig && !sig->common.specific.next);
+ ASSERT(state & ERTS_PSFLG_SIG_IN_Q);
+ if (ERTS_SIG_IS_NON_MSG(first)) {
+ sig->common.specific.next = this;
+ }
+ else if (last_next) {
+ ASSERT(first->next && ERTS_SIG_IS_NON_MSG(first->next));
+ sig->common.specific.next = &first->next;
+ }
+ }
+
+ if (last_next) {
+ ASSERT(dbg_count_nmsigs(first) >= 2);
+ rp->sig_inq.nmsigs.last = last_next;
+ }
+ else if (ERTS_SIG_IS_NON_MSG(first)) {
+ ASSERT(dbg_count_nmsigs(first) == 1);
+ rp->sig_inq.nmsigs.last = this;
+ }
+ else
+ ASSERT(dbg_count_nmsigs(first) == 0);
+
+ rp->sig_inq.len += num_msgs;
+
+ ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(rp);
+
+ return state;
+}
+
+erts_aint32_t erts_enqueue_signals(Process *rp, ErtsMessage *first,
+ ErtsMessage **last, ErtsMessage **last_next,
+ Uint num_msgs,
+ erts_aint32_t in_state)
+{
+ return enqueue_signals(rp, first, last, last_next, num_msgs, in_state);
+}
+
+void
+erts_make_dirty_proc_handled(Eterm pid,
+ erts_aint32_t state,
+ erts_aint32_t prio)
+{
+ Eterm *hp;
+ ErtsMessage *mp;
+ Process *sig_handler;
+
+ ASSERT(state & (ERTS_PSFLG_DIRTY_RUNNING |
+ ERTS_PSFLG_DIRTY_RUNNING_SYS));
+
+ if (prio < 0)
+ prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
+
+ switch (prio) {
+ case PRIORITY_MAX:
+ sig_handler = erts_dirty_process_signal_handler_max;
+ break;
+ case PRIORITY_HIGH:
+ sig_handler = erts_dirty_process_signal_handler_high;
+ break;
+ default:
+ sig_handler = erts_dirty_process_signal_handler;
+ break;
+ }
+
+ /* Make sure signals are handled... */
+ mp = erts_alloc_message(0, &hp);
+ erts_queue_message(sig_handler, 0, mp, pid, am_system);
+}
+
+static void
+check_push_msgq_len_offs_marker(Process *rp, ErtsSignal *sig);
+
+
+static int
+proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
+{
+ int res;
+ Process *rp;
+ ErtsMessage *first, *last, **last_next, **sigp;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ int is_normal_sched = !!esdp && esdp->type == ERTS_SCHED_NORMAL;
+ erts_aint32_t state;
+ ErtsSignal *pend_sig;
+
+ if (is_normal_sched) {
+ pend_sig = esdp->pending_signal.sig;
+ if (op == ERTS_SIG_Q_OP_MONITOR
+ && ((ErtsMonitor*)sig)->type == ERTS_MON_TYPE_PROC) {
+
+ if (!pend_sig) {
+ esdp->pending_signal.sig = sig;
+ esdp->pending_signal.to = pid;
+#ifdef DEBUG
+ esdp->pending_signal.dbg_from = esdp->current_process;
+#endif
+ return 1;
+ }
+ ASSERT(esdp->pending_signal.dbg_from == esdp->current_process);
+ if (pend_sig != sig) {
+ /* Switch them and send previously pending signal instead */
+ Eterm pend_to = esdp->pending_signal.to;
+ esdp->pending_signal.sig = sig;
+ esdp->pending_signal.to = pid;
+ sig = pend_sig;
+ pid = pend_to;
+ }
+ else {
+ /* Caller wants to flush pending signal */
+ ASSERT(pid == esdp->pending_signal.to);
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+#ifdef DEBUG
+ esdp->pending_signal.dbg_from = NULL;
+#endif
+ pend_sig = NULL;
+ }
+ rp = erts_proc_lookup_raw(pid);
+ if (!rp) {
+ erts_proc_sig_send_monitor_down((ErtsMonitor*)sig, am_noproc);
+ return 1;
+ }
+ }
+ else if (pend_sig && pid == esdp->pending_signal.to) {
+ /* Flush pending signal to maintain signal order */
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+
+ rp = erts_proc_lookup_raw(pid);
+ if (!rp) {
+ erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc);
+ return 0;
+ }
+
+ /* Prepend pending signal */
+ pend_sig->common.next = (ErtsMessage*) sig;
+ pend_sig->common.specific.next = &pend_sig->common.next;
+ first = (ErtsMessage*) pend_sig;
+ last = (ErtsMessage*) sig;
+ sigp = last_next = &pend_sig->common.next;
+ goto first_last_done;
+ }
+ else {
+ pend_sig = NULL;
+ rp = erts_proc_lookup_raw(pid);
+ if (!rp)
+ return 0;
+ }
+ }
+ else {
+ rp = erts_proc_lookup_raw_inc_refc(pid);
+ if (!rp)
+ return 0;
+ pend_sig = NULL;
+ }
+
+ first = last = (ErtsMessage *) sig;
+ last_next = NULL;
+ sigp = &first;
+
+first_last_done:
+ sig->common.specific.next = NULL;
+
+ /* may add signals before sig */
+ sig_enqueue_trace(c_p, sigp, op, rp, &last_next);
+
+ last->next = NULL;
+
+ erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
+
+ state = erts_atomic32_read_nob(&rp->state);
+
+ if (ERTS_PSFLG_FREE & state)
+ res = 0;
+ else {
+ state = enqueue_signals(rp, first, &last->next, last_next, 0, state);
+ if (ERTS_UNLIKELY(op == ERTS_SIG_Q_OP_PROCESS_INFO))
+ check_push_msgq_len_offs_marker(rp, sig);
+ res = !0;
+ }
+
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
+
+ if (res == 0) {
+ sig_enqueue_trace_cleanup(first, sig);
+ if (pend_sig) {
+ erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc);
+ if (sig == pend_sig) {
+ /* We did a switch, callers signal is now pending (still ok) */
+ ASSERT(esdp->pending_signal.sig);
+ res = 1;
+ }
+ }
+ }
+ else
+ erts_proc_notify_new_sig(rp, state, 0);
+
+ if (!is_normal_sched)
+ erts_proc_dec_refc(rp);
+
+ return res;
+}
+
+void erts_proc_sig_send_pending(ErtsSchedulerData* esdp)
+{
+ ErtsSignal* sig = esdp->pending_signal.sig;
+ int op;
+
+ ASSERT(esdp && esdp->type == ERTS_SCHED_NORMAL);
+ ASSERT(sig);
+ ASSERT(is_internal_pid(esdp->pending_signal.to));
+
+ op = ERTS_SIG_Q_OP_MONITOR;
+ ASSERT(op == ERTS_PROC_SIG_OP(sig->common.tag));
+
+ if (!proc_queue_signal(NULL, esdp->pending_signal.to, sig, op)) {
+ ErtsMonitor* mon = (ErtsMonitor*)sig;
+ erts_proc_sig_send_monitor_down(mon, am_noproc);
+ }
+}
+
+static int
+maybe_elevate_sig_handling_prio(Process *c_p, Eterm other)
+{
+ /*
+ * returns:
+ * > 0 -> elevated prio; process alive or exiting
+ * < 0 -> no elevation needed; process alive or exiting
+ * 0 -> process terminated (free)
+ */
+ int res;
+ Process *rp;
+ erts_aint32_t state, my_prio, other_prio;
+
+ rp = erts_proc_lookup_raw(other);
+ if (!rp)
+ res = 0;
+ else {
+ res = -1;
+ state = erts_atomic32_read_nob(&c_p->state);
+ my_prio = ERTS_PSFLGS_GET_USR_PRIO(state);
+
+ state = erts_atomic32_read_nob(&rp->state);
+ other_prio = ERTS_PSFLGS_GET_USR_PRIO(state);
+
+ if (other_prio > my_prio) {
+ /* Others prio is lower than mine; elevate it... */
+ res = !!erts_sig_prio(other, my_prio);
+ if (res) {
+ /* ensure handled if dirty executing... */
+ state = erts_atomic32_read_nob(&rp->state);
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ erts_make_dirty_proc_handled(other, state, my_prio);
+ }
+ }
+ }
+ }
+ return res;
+}
+
+void
+erts_proc_sig_fetch__(Process *proc)
+{
+ ASSERT(proc->sig_inq.first);
+
+ if (!proc->sig_inq.nmsigs.next) {
+ ASSERT(!(ERTS_PSFLG_SIG_IN_Q
+ & erts_atomic32_read_nob(&proc->state)));
+ ASSERT(!proc->sig_inq.nmsigs.last);
+
+ if (proc->sig_qs.cont || ERTS_MSG_RECV_TRACED(proc)) {
+ *proc->sig_qs.cont_last = proc->sig_inq.first;
+ proc->sig_qs.cont_last = proc->sig_inq.last;
+ }
+ else {
+ *proc->sig_qs.last = proc->sig_inq.first;
+ proc->sig_qs.last = proc->sig_inq.last;
+ }
+ }
+ else {
+ erts_aint32_t s;
+ ASSERT(proc->sig_inq.nmsigs.last);
+ if (!proc->sig_qs.nmsigs.last) {
+ ASSERT(!proc->sig_qs.nmsigs.next);
+ if (proc->sig_inq.nmsigs.next == &proc->sig_inq.first)
+ proc->sig_qs.nmsigs.next = proc->sig_qs.cont_last;
+ else
+ proc->sig_qs.nmsigs.next = proc->sig_inq.nmsigs.next;
+
+ s = erts_atomic32_read_bset_nob(&proc->state,
+ (ERTS_PSFLG_SIG_Q
+ | ERTS_PSFLG_SIG_IN_Q),
+ ERTS_PSFLG_SIG_Q);
+
+ ASSERT((s & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q))
+ == ERTS_PSFLG_SIG_IN_Q); (void)s;
+ }
+ else {
+ ErtsSignal *sig;
+ ASSERT(proc->sig_qs.nmsigs.next);
+ sig = ((ErtsSignal *) *proc->sig_qs.nmsigs.last);
+ ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+ ASSERT(!sig->common.specific.next);
+ if (proc->sig_inq.nmsigs.next == &proc->sig_inq.first)
+ sig->common.specific.next = proc->sig_qs.cont_last;
+ else
+ sig->common.specific.next = proc->sig_inq.nmsigs.next;
+
+ s = erts_atomic32_read_band_nob(&proc->state,
+ ~ERTS_PSFLG_SIG_IN_Q);
+
+ ASSERT((s & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q))
+ == (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)); (void)s;
+ }
+ if (proc->sig_inq.nmsigs.last == &proc->sig_inq.first)
+ proc->sig_qs.nmsigs.last = proc->sig_qs.cont_last;
+ else
+ proc->sig_qs.nmsigs.last = proc->sig_inq.nmsigs.last;
+ proc->sig_inq.nmsigs.next = NULL;
+ proc->sig_inq.nmsigs.last = NULL;
+
+ *proc->sig_qs.cont_last = proc->sig_inq.first;
+ proc->sig_qs.cont_last = proc->sig_inq.last;
+ }
+
+ proc->sig_qs.len += proc->sig_inq.len;
+
+ proc->sig_inq.first = NULL;
+ proc->sig_inq.last = &proc->sig_inq.first;
+ proc->sig_inq.len = 0;
+
+}
+
+Sint
+erts_proc_sig_fetch_msgq_len_offs__(Process *proc)
+{
+ ErtsProcSigMsgQLenOffsetMarker *marker
+ = (ErtsProcSigMsgQLenOffsetMarker *) proc->sig_inq.first;
+
+ ASSERT(marker->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK);
+
+ if (marker->common.next) {
+ Sint len;
+
+ proc->flags |= F_DELAYED_PSIGQS_LEN;
+
+ /*
+ * Prevent update of sig_qs.len in fetch. These
+ * updates are done via process-info signal(s)
+ * instead...
+ */
+ len = proc->sig_inq.len;
+ marker->delayed_len += len;
+ marker->len_offset -= len;
+ proc->sig_inq.len = 0;
+
+ /*
+ * Temorarily remove marker during fetch...
+ */
+
+ proc->sig_inq.first = marker->common.next;
+ if (proc->sig_inq.last == &marker->common.next)
+ proc->sig_inq.last = &proc->sig_inq.first;
+ if (proc->sig_inq.nmsigs.next == &marker->common.next)
+ proc->sig_inq.nmsigs.next = &proc->sig_inq.first;
+ if (proc->sig_inq.nmsigs.last == &marker->common.next)
+ proc->sig_inq.nmsigs.last = &proc->sig_inq.first;
+
+ erts_proc_sig_fetch__(proc);
+
+ marker->common.next = NULL;
+ proc->sig_inq.first = (ErtsMessage *) marker;
+ proc->sig_inq.last = &marker->common.next;
+
+ }
+
+ return marker->delayed_len;
+}
+
+static ERTS_INLINE Sint
+proc_sig_privqs_len(Process *c_p, int have_qlock)
+{
+ Sint res = c_p->sig_qs.len;
+
+ ERTS_LC_ASSERT(!have_qlock
+ ? (ERTS_PROC_LOCK_MAIN
+ == erts_proc_lc_my_proc_locks(c_p))
+ : ((ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_MAIN)
+ == ((ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_MAIN)
+ & erts_proc_lc_my_proc_locks(c_p))));
+
+ if (c_p->flags & F_DELAYED_PSIGQS_LEN) {
+ ErtsProcSigMsgQLenOffsetMarker *marker;
+
+ if (!have_qlock)
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+
+ marker = (ErtsProcSigMsgQLenOffsetMarker *) c_p->sig_inq.first;
+ ASSERT(marker);
+ ASSERT(marker->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK);
+
+ res += marker->delayed_len;
+
+ if (!have_qlock)
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+ }
+
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ {
+ Sint len = 0;
+ ERTS_FOREACH_SIG_PRIVQS(
+ c_p, mp,
+ {
+ if (ERTS_SIG_IS_MSG(mp))
+ len++;
+ });
+ ERTS_ASSERT(res == len);
+ }
+#endif
+
+ return res;
+}
+
+Sint
+erts_proc_sig_privqs_len(Process *c_p)
+{
+ return proc_sig_privqs_len(c_p, 0);
+}
+
+static void do_seq_trace_output(Eterm to, Eterm token, Eterm msg);
+
+static void
+send_gen_exit_signal(Process *c_p, Eterm from_tag,
+ Eterm from, Eterm to,
+ Sint16 op, Eterm reason, Eterm ref,
+ Eterm token, int normal_kills)
+{
+ ErtsExitSignalData *xsigd;
+ Eterm *hp, *start_hp, s_reason, s_ref, s_message, s_token, s_from;
+ ErtsMessage *mp;
+ ErlHeapFragment *hfrag;
+ ErlOffHeap *ohp;
+ Uint hsz, from_sz, reason_sz, ref_sz, token_sz;
+ int seq_trace;
+#ifdef USE_VM_PROBES
+ Eterm s_utag, utag;
+ Uint utag_sz;
+#endif
+
+ ASSERT(is_immed(from_tag));
+
+ hsz = sizeof(ErtsExitSignalData)/sizeof(Uint);
+
+ seq_trace = c_p && have_seqtrace(token);
+ if (seq_trace)
+ seq_trace_update_send(c_p);
+
+#ifdef USE_VM_PROBES
+ utag_sz = 0;
+ utag = NIL;
+ if (c_p && token != NIL && (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING)) {
+ utag_sz = size_object(DT_UTAG(c_p));
+ utag = DT_UTAG(c_p);
+ }
+ else if (token == am_have_dt_utag) {
+ token = NIL;
+ }
+ hsz += utag_sz;
+#endif
+
+ token_sz = is_immed(token) ? 0 : size_object(token);
+ hsz += token_sz;
+
+ from_sz = is_immed(from) ? 0 : size_object(from);
+ hsz += from_sz;
+
+ reason_sz = is_immed(reason) ? 0 : size_object(reason);
+ hsz += reason_sz;
+
+ switch (op) {
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED: {
+ /* {'EXIT', From, Reason} */
+ hsz += 4; /* 3-tuple */
+ ref_sz = 0;
+ break;
+ }
+ case ERTS_SIG_Q_OP_MONITOR_DOWN: {
+ /* {'DOWN', Ref, process, From, Reason} */
+ hsz += 6; /* 5-tuple */
+ ref_sz = NC_HEAP_SIZE(ref);
+ hsz += ref_sz;
+ break;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid exit signal op");
+ break;
+ }
+
+ /*
+ * Allocate message combined with heap fragment...
+ */
+ mp = erts_alloc_message(hsz, &hp);
+ hfrag = &mp->hfrag;
+ mp->next = NULL;
+ ohp = &hfrag->off_heap;
+ start_hp = hp;
+
+ s_token = (is_immed(token)
+ ? token
+ : copy_struct(token, token_sz, &hp, ohp));
+
+ s_reason = (is_immed(reason)
+ ? reason
+ : copy_struct(reason, reason_sz, &hp, ohp));
+
+ s_from = (is_immed(from)
+ ? from
+ : copy_struct(from, from_sz, &hp, ohp));
+
+ if (!ref_sz)
+ s_ref = NIL;
+ else
+ s_ref = STORE_NC(&hp, ohp, ref);
+
+ switch (op) {
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED:
+ /* {'EXIT', From, Reason} */
+ s_message = TUPLE3(hp, am_EXIT, s_from, s_reason);
+ hp += 4;
+ break;
+ case ERTS_SIG_Q_OP_MONITOR_DOWN:
+ /* {'DOWN', Ref, process, From, Reason} */
+ s_message = TUPLE5(hp, am_DOWN, s_ref, am_process, s_from, s_reason);
+ hp += 6;
+ break;
+ }
+
+#ifdef USE_VM_PROBES
+ s_utag = (is_immed(utag)
+ ? utag
+ : copy_struct(utag, utag_sz, &hp, ohp));
+ ERL_MESSAGE_DT_UTAG(mp) = s_utag;
+#endif
+
+ ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(op,
+ ERTS_SIG_Q_TYPE_GEN_EXIT,
+ 0);
+ ERL_MESSAGE_TOKEN(mp) = s_token;
+ ERL_MESSAGE_FROM(mp) = from_tag; /* immediate... */
+
+ hfrag->used_size = hp - start_hp;
+
+ xsigd = (ErtsExitSignalData *) (char *) hp;
+
+ xsigd->message = s_message;
+ xsigd->from = s_from;
+ xsigd->reason = s_reason;
+ if (is_nil(s_ref))
+ xsigd->u.normal_kills = normal_kills;
+ else {
+ ASSERT(is_ref(s_ref));
+ xsigd->u.ref = s_ref;
+ }
+
+ if (seq_trace)
+ do_seq_trace_output(to, s_token, s_message);
+
+ if (!proc_queue_signal(c_p, to, (ErtsSignal *) mp, op)) {
+ mp->next = NULL;
+ erts_cleanup_messages(mp);
+ }
+}
+
+static void
+do_seq_trace_output(Eterm to, Eterm token, Eterm msg)
+{
+ /*
+ * We could do this when enqueuing the signal and avoid some
+ * locking. However, the enqueuing code would then always
+ * have the penalty of this seq-tracing code which we do not
+ * want...
+ */
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ int is_normal_sched = !!esdp && esdp->type == ERTS_SCHED_NORMAL;
+ Process *rp;
+
+ if (is_normal_sched)
+ rp = erts_proc_lookup_raw(to);
+ else
+ rp = erts_proc_lookup_raw_inc_refc(to);
+
+ if (rp) {
+ erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
+
+ if (!ERTS_PROC_IS_EXITING(rp))
+ seq_trace_output(token, msg, SEQ_TRACE_SEND, to, rp);
+
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
+
+ if (!is_normal_sched)
+ erts_proc_dec_refc(rp);
+ }
+}
+
+void
+erts_proc_sig_send_persistent_monitor_msg(Uint16 type, Eterm key,
+ Eterm from, Eterm to,
+ Eterm msg, Uint msg_sz)
+{
+ ErtsPersistMonMsg *prst_mon;
+ ErtsMessage *mp;
+ ErlHeapFragment *hfrag;
+ Eterm *hp, *start_hp, message;
+ ErlOffHeap *ohp;
+ Uint hsz = sizeof(ErtsPersistMonMsg) + msg_sz;
+
+ /*
+ * Allocate message combined with heap fragment...
+ */
+ mp = erts_alloc_message(hsz, &hp);
+ hfrag = &mp->hfrag;
+ mp->next = NULL;
+ ohp = &hfrag->off_heap;
+ start_hp = hp;
+
+ ASSERT(msg_sz == size_object(msg));
+ message = copy_struct(msg, msg_sz, &hp, ohp);
+ hfrag->used_size = hp - start_hp;
+
+ prst_mon = (ErtsPersistMonMsg *) (char *) hp;
+ prst_mon->message = message;
+
+ switch (type) {
+ case ERTS_MON_TYPE_NODES:
+ ASSERT(is_small(key));
+ prst_mon->key = key;
+ break;
+
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ ASSERT(is_internal_ref(key));
+ ASSERT(is_tuple_arity(message, 5));
+
+ prst_mon->key = tuple_val(message)[2];
+
+ ASSERT(eq(prst_mon->key, key));
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Invalid persistent monitor type");
+ prst_mon->key = key;
+ break;
+ }
+
+ ASSERT(is_immed(from));
+
+ ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_PERSISTENT_MON_MSG,
+ type, 0);
+ ERL_MESSAGE_FROM(mp) = from;
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+
+ if (!proc_queue_signal(NULL, to, (ErtsSignal *) mp,
+ ERTS_SIG_Q_OP_PERSISTENT_MON_MSG)) {
+ mp->next = NULL;
+ erts_cleanup_messages(mp);
+ }
+}
+
+static ERTS_INLINE Eterm
+get_persist_mon_msg(ErtsMessage *sig, Eterm *msg)
+{
+ ErtsPersistMonMsg *prst_mon;
+ prst_mon = ((ErtsPersistMonMsg *)
+ (char *) (&sig->hfrag.mem[0]
+ + sig->hfrag.used_size));
+ *msg = prst_mon->message;
+ return prst_mon->key;
+}
+
+void
+erts_proc_sig_send_exit(Process *c_p, Eterm from, Eterm to,
+ Eterm reason, Eterm token,
+ int normal_kills)
+{
+ Eterm from_tag;
+ ASSERT(!c_p || c_p->common.id == from);
+ if (is_immed(from)) {
+ ASSERT(is_internal_pid(from) || is_internal_port(from));
+ from_tag = from;
+ }
+ else {
+ DistEntry *dep;
+ ASSERT(is_external_pid(from));
+ dep = external_pid_dist_entry(from);
+ from_tag = dep->sysname;
+ }
+ send_gen_exit_signal(c_p, from_tag, from, to, ERTS_SIG_Q_OP_EXIT,
+ reason, NIL, token, normal_kills);
+}
+
+void
+erts_proc_sig_send_link_exit(Process *c_p, Eterm from, ErtsLink *lnk,
+ Eterm reason, Eterm token)
+{
+ Eterm to;
+ ASSERT(!c_p || c_p->common.id == from);
+ ASSERT(lnk);
+ to = lnk->other.item;
+ if (is_not_immed(reason) || is_not_nil(token)) {
+ ASSERT(is_internal_pid(from) || is_internal_port(from));
+ send_gen_exit_signal(c_p, from, from, to, ERTS_SIG_Q_OP_EXIT_LINKED,
+ reason, NIL, token, 0);
+ }
+ else {
+ /* Pass signal using old link structure... */
+ ErtsSignal *sig = (ErtsSignal *) lnk;
+ lnk->other.item = reason; /* pass reason via this other.item */
+ sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_EXIT_LINKED,
+ lnk->type, 0);
+ if (proc_queue_signal(c_p, to, sig, ERTS_SIG_Q_OP_EXIT_LINKED))
+ return; /* receiver will destroy lnk structure */
+ }
+ if (lnk)
+ erts_link_release(lnk);
+}
+
+int
+erts_proc_sig_send_link(Process *c_p, Eterm to, ErtsLink *lnk)
+{
+ ErtsSignal *sig;
+ Uint16 type = lnk->type;
+
+ ASSERT(!c_p || c_p->common.id == lnk->other.item);
+ ASSERT(lnk);
+ ASSERT(is_internal_pid(to));
+
+ sig = (ErtsSignal *) lnk;
+ sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_LINK,
+ type, 0);
+
+ return proc_queue_signal(c_p, to, sig, ERTS_SIG_Q_OP_LINK);
+}
+
+void
+erts_proc_sig_send_unlink(Process *c_p, ErtsLink *lnk)
+{
+ ErtsSignal *sig;
+ Eterm to;
+
+ ASSERT(lnk);
+
+ sig = (ErtsSignal *) lnk;
+ to = lnk->other.item;
+
+ ASSERT(is_internal_pid(to));
+
+ sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_UNLINK,
+ lnk->type, 0);
+
+ if (!proc_queue_signal(c_p, to, sig, ERTS_SIG_Q_OP_UNLINK))
+ erts_link_release(lnk);
+}
+
+void
+erts_proc_sig_send_dist_link_exit(DistEntry *dep,
+ Eterm from, Eterm to,
+ Eterm reason, Eterm token)
+{
+ send_gen_exit_signal(NULL, dep->sysname, from, to, ERTS_SIG_Q_OP_EXIT_LINKED,
+ reason, NIL, token, 0);
+}
+
+void
+erts_proc_sig_send_dist_unlink(DistEntry *dep, Eterm from, Eterm to)
+{
+ ErtsSignal *sig;
+
+ ASSERT(is_internal_pid(to));
+ ASSERT(is_external_pid(from));
+ ASSERT(dep == external_pid_dist_entry(from));
+
+ sig = (ErtsSignal *) make_sig_dist_link_op(ERTS_SIG_Q_OP_UNLINK,
+ to, from);
+
+ if (!proc_queue_signal(NULL, to, sig, ERTS_SIG_Q_OP_UNLINK))
+ destroy_sig_dist_link_op((ErtsSigDistLinkOp *) sig);
+}
+
+void
+erts_proc_sig_send_dist_monitor_down(DistEntry *dep, Eterm ref,
+ Eterm from, Eterm to,
+ Eterm reason)
+{
+ Eterm monitored, heap[3];
+ if (is_atom(from))
+ monitored = TUPLE2(&heap[0], from, dep->sysname);
+ else
+ monitored = from;
+ send_gen_exit_signal(NULL, dep->sysname, monitored,
+ to, ERTS_SIG_Q_OP_MONITOR_DOWN,
+ reason, ref, NIL, 0);
+}
+
+void
+erts_proc_sig_send_monitor_down(ErtsMonitor *mon, Eterm reason)
+{
+ Eterm to;
+
+ ASSERT(erts_monitor_is_target(mon));
+ ASSERT(!erts_monitor_is_in_table(mon));
+
+ to = mon->other.item;
+ ASSERT(is_internal_pid(to));
+
+ if (is_immed(reason)) {
+ /* Pass signal using old monitor structure... */
+ ErtsSignal *sig;
+
+ send_using_monitor_struct:
+
+ mon->other.item = reason; /* Pass immed reason via other.item... */
+ sig = (ErtsSignal *) mon;
+ sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_MONITOR_DOWN,
+ mon->type, 0);
+ if (proc_queue_signal(NULL, to, sig, ERTS_SIG_Q_OP_MONITOR_DOWN))
+ return; /* receiver will destroy mon structure */
+ }
+ else {
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
+ Eterm from_tag, monitored, heap[3];
+
+ if (mon->type == ERTS_MON_TYPE_SUSPEND) {
+ /*
+ * Set reason to 'undefined', since exit
+ * reason is not used for suspend monitors,
+ * and send using monitor structure. This
+ * since we don't want to trigger
+ * unnecessary memory allocation etc...
+ */
+ reason = am_undefined;
+ goto send_using_monitor_struct;
+ }
+
+ if (!(mon->flags & ERTS_ML_FLG_NAME)) {
+ from_tag = monitored = mdp->origin.other.item;
+ if (is_external_pid(from_tag)) {
+ DistEntry *dep = external_pid_dist_entry(from_tag);
+ from_tag = dep->sysname;
+ }
+ }
+ else {
+ ErtsMonitorDataExtended *mdep;
+ Eterm name, node;
+ mdep = (ErtsMonitorDataExtended *) mdp;
+ name = mdep->u.name;
+ ASSERT(is_atom(name));
+ if (mdep->dist) {
+ node = mdep->dist->nodename;
+ from_tag = node;
+ }
+ else {
+ node = erts_this_dist_entry->sysname;
+ from_tag = mdp->origin.other.item;
+ }
+ ASSERT(is_internal_port(from_tag)
+ || is_internal_pid(from_tag)
+ || is_atom(from_tag));
+ monitored = TUPLE2(&heap[0], name, node);
+ }
+ send_gen_exit_signal(NULL, from_tag, monitored,
+ to, ERTS_SIG_Q_OP_MONITOR_DOWN,
+ reason, mdp->ref, NIL, 0);
+ }
+ erts_monitor_release(mon);
+}
+
+void
+erts_proc_sig_send_dist_demonitor(Eterm to, Eterm ref)
+{
+ ErtsSigDistProcDemonitor *dmon;
+ ErtsSignal *sig;
+ Eterm *hp;
+ ErlOffHeap oh;
+ size_t size;
+
+ ERTS_INIT_OFF_HEAP(&oh);
+
+ ASSERT(is_internal_pid(to));
+
+ size = sizeof(ErtsSigDistProcDemonitor) - sizeof(Eterm);
+ ASSERT(is_ref(ref));
+ size += NC_HEAP_SIZE(ref)*sizeof(Eterm);
+
+ dmon = erts_alloc(ERTS_ALC_T_DIST_DEMONITOR, size);
+
+ hp = &dmon->heap[0];
+ dmon->ref = STORE_NC(&hp, &oh, ref);
+ sig = (ErtsSignal *) dmon;
+
+ sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_DEMONITOR,
+ ERTS_SIG_Q_TYPE_DIST_PROC_DEMONITOR,
+ 0);
+
+ if (!proc_queue_signal(NULL, to, sig, ERTS_SIG_Q_OP_DEMONITOR))
+ destroy_dist_proc_demonitor(dmon);
+}
+
+void
+erts_proc_sig_send_demonitor(ErtsMonitor *mon)
+{
+ ErtsSignal *sig = (ErtsSignal *) mon;
+ Uint16 type = mon->type;
+ Eterm to = mon->other.item;
+
+ ASSERT(is_internal_pid(to));
+ ASSERT(erts_monitor_is_origin(mon));
+ ASSERT(!erts_monitor_is_in_table(mon));
+
+ sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_DEMONITOR,
+ type, 0);
+
+ if (!proc_queue_signal(NULL, to, sig, ERTS_SIG_Q_OP_DEMONITOR))
+ erts_monitor_release(mon);
+}
+
+int
+erts_proc_sig_send_monitor(ErtsMonitor *mon, Eterm to)
+{
+ ErtsSignal *sig = (ErtsSignal *) mon;
+ Uint16 type = mon->type;
+
+ ASSERT(is_internal_pid(to) || to == am_undefined);
+ ASSERT(erts_monitor_is_target(mon));
+
+ sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_MONITOR,
+ type, 0);
+
+ return proc_queue_signal(NULL, to, sig, ERTS_SIG_Q_OP_MONITOR);
+}
+
+void
+erts_proc_sig_send_trace_change(Eterm to, Uint on, Uint off, Eterm tracer)
+{
+ ErtsSigTraceInfo *ti;
+ Eterm tag;
+
+ ti = erts_alloc(ERTS_ALC_T_SIG_DATA, sizeof(ErtsSigTraceInfo));
+ tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_TRACE_CHANGE_STATE,
+ ERTS_SIG_Q_TYPE_ADJUST_TRACE_INFO,
+ 0);
+
+ ti->common.tag = tag;
+ ti->flags_off = off;
+ ti->flags_on = on;
+ ti->tracer = NIL;
+ if (is_not_nil(tracer))
+ erts_tracer_update(&ti->tracer, tracer);
+
+ if (!proc_queue_signal(NULL, to, (ErtsSignal *) ti,
+ ERTS_SIG_Q_OP_TRACE_CHANGE_STATE))
+ destroy_trace_info(ti);
+}
+
+void
+erts_proc_sig_send_group_leader(Process *c_p, Eterm to, Eterm gl, Eterm ref)
+{
+ int res;
+ ErtsSigGroupLeader *sgl;
+ Eterm *hp;
+ Uint gl_sz, ref_sz, size;
+ erts_aint_t init_flags = ERTS_SIG_GL_FLG_ACTIVE|ERTS_SIG_GL_FLG_RECEIVER;
+ if (c_p)
+ init_flags |= ERTS_SIG_GL_FLG_SENDER;
+
+ ASSERT(c_p ? is_internal_ref(ref) : ref == NIL);
+
+ gl_sz = is_immed(gl) ? 0 : size_object(gl);
+ ref_sz = is_immed(ref) ? 0 : size_object(ref);
+
+ size = sizeof(ErtsSigGroupLeader);
+
+ size += (gl_sz + ref_sz - 1) * sizeof(Eterm);
+
+ sgl = erts_alloc(ERTS_ALC_T_SIG_DATA, size);
+
+ erts_atomic_init_nob(&sgl->flags, init_flags);
+
+ ERTS_INIT_OFF_HEAP(&sgl->oh);
+
+ hp = &sgl->heap[0];
+
+ sgl->group_leader = is_immed(gl) ? gl : copy_struct(gl, gl_sz, &hp, &sgl->oh);
+ sgl->reply_to = c_p ? c_p->common.id : NIL;
+ sgl->ref = is_immed(ref) ? ref : copy_struct(ref, ref_sz, &hp, &sgl->oh);
+
+ sgl->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_GROUP_LEADER,
+ ERTS_SIG_Q_TYPE_UNDEFINED,
+ 0);
+
+ res = proc_queue_signal(c_p, to, (ErtsSignal *) sgl,
+ ERTS_SIG_Q_OP_GROUP_LEADER);
+
+ if (!res)
+ destroy_sig_group_leader(sgl);
+ else if (c_p) {
+ erts_aint_t flags, rm_flags = ERTS_SIG_GL_FLG_SENDER;
+ int prio_res = maybe_elevate_sig_handling_prio(c_p, to);
+ if (!prio_res)
+ rm_flags |= ERTS_SIG_GL_FLG_ACTIVE;
+ flags = erts_atomic_read_band_nob(&sgl->flags, ~rm_flags);
+ if (!prio_res && (flags & ERTS_SIG_GL_FLG_ACTIVE))
+ res = 0; /* We deactivated signal... */
+ if ((flags & ~rm_flags) == 0)
+ destroy_sig_group_leader(sgl);
+ }
+
+ if (!res && c_p)
+ group_leader_reply(c_p, c_p->common.id, ref, 0);
+}
+
+void
+erts_proc_sig_send_is_alive_request(Process *c_p, Eterm to, Eterm ref)
+{
+ ErlHeapFragment *hfrag;
+ Uint hsz;
+ Eterm *hp, *start_hp, ref_cpy, msg;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+ ErtsIsAliveRequest *alive_req;
+
+ ASSERT(is_internal_ordinary_ref(ref));
+
+ hsz = ERTS_REF_THING_SIZE + 3 + sizeof(ErtsIsAliveRequest)/sizeof(Eterm);
+
+ mp = erts_alloc_message(hsz, &hp);
+ hfrag = &mp->hfrag;
+ mp->next = NULL;
+ ohp = &hfrag->off_heap;
+ start_hp = hp;
+
+ ref_cpy = STORE_NC(&hp, ohp, ref);
+ msg = TUPLE2(hp, ref_cpy, am_false); /* default res 'false' */
+ hp += 3;
+
+ hfrag->used_size = hp - start_hp;
+
+ alive_req = (ErtsIsAliveRequest *) (char *) hp;
+ alive_req->message = msg;
+ alive_req->requester = c_p->common.id;
+
+ ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_IS_ALIVE,
+ ERTS_SIG_Q_TYPE_UNDEFINED,
+ 0);
+
+ if (proc_queue_signal(c_p, to, (ErtsSignal *) mp, ERTS_SIG_Q_OP_IS_ALIVE))
+ (void) maybe_elevate_sig_handling_prio(c_p, to);
+ else {
+ /* It wasn't alive; reply to ourselves... */
+ mp->next = NULL;
+ mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN, mp, msg, am_system);
+ }
+}
+
+int
+erts_proc_sig_send_process_info_request(Process *c_p,
+ Eterm to,
+ int *item_ix,
+ int len,
+ int need_msgq_len,
+ int flags,
+ Uint reserve_size,
+ Eterm ref)
+{
+ Uint size = sizeof(ErtsProcessInfoSig) + (len - 1) * sizeof(int);
+ ErtsProcessInfoSig *pis = erts_alloc(ERTS_ALC_T_SIG_DATA, size);
+ int res;
+
+ ASSERT(c_p);
+ ASSERT(item_ix);
+ ASSERT(len > 0);
+ ASSERT(is_internal_ordinary_ref(ref));
+
+ pis->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_PROCESS_INFO,
+ 0, 0);
+
+ if (!need_msgq_len)
+ pis->msgq_len_offset = ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE;
+ else {
+ pis->msgq_len_offset = ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC;
+ pis->marker.common.next = NULL;
+ pis->marker.common.specific.next = NULL;
+ pis->marker.common.tag = ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK;
+ pis->marker.refc = 0;
+ pis->marker.delayed_len = 0;
+ pis->marker.len_offset = 0;
+ }
+ pis->requester = c_p->common.id;
+ sys_memcpy((void *) &pis->oref_thing,
+ (void *) internal_ref_val(ref),
+ sizeof(ErtsORefThing));
+ pis->ref = make_internal_ref((char *) &pis->oref_thing);
+ pis->reserve_size = reserve_size;
+ pis->len = len;
+ pis->flags = flags;
+ sys_memcpy((void *) &pis->item_ix[0],
+ (void *) item_ix,
+ sizeof(int)*len);
+ res = proc_queue_signal(c_p, to, (ErtsSignal *) pis,
+ ERTS_SIG_Q_OP_PROCESS_INFO);
+ if (res)
+ (void) maybe_elevate_sig_handling_prio(c_p, to);
+ else
+ erts_free(ERTS_ALC_T_SIG_DATA, pis);
+ return res;
+}
+
+void
+erts_proc_sig_send_sync_suspend(Process *c_p, Eterm to, Eterm tag, Eterm reply)
+{
+ ErlHeapFragment *hfrag;
+ Uint hsz, tag_sz;
+ Eterm *hp, *start_hp, tag_cpy, msg, default_reply;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+ ErtsSyncSuspendRequest *ssusp;
+ int async_suspend;
+
+ tag_sz = size_object(tag);
+
+ hsz = 3 + tag_sz + sizeof(ErtsSyncSuspendRequest)/sizeof(Eterm);
+
+ mp = erts_alloc_message(hsz, &hp);
+ hfrag = &mp->hfrag;
+ mp->next = NULL;
+ ohp = &hfrag->off_heap;
+ start_hp = hp;
+
+ tag_cpy = copy_struct(tag, tag_sz, &hp, ohp);
+
+ async_suspend = is_non_value(reply);
+ default_reply = async_suspend ? am_suspended : reply;
+
+ msg = TUPLE2(hp, tag_cpy, default_reply);
+ hp += 3;
+
+ hfrag->used_size = hp - start_hp;
+
+ ssusp = (ErtsSyncSuspendRequest *) (char *) hp;
+ ssusp->message = msg;
+ ssusp->requester = c_p->common.id;
+ ssusp->async = async_suspend;
+
+ ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_SYNC_SUSPEND,
+ ERTS_SIG_Q_TYPE_UNDEFINED,
+ 0);
+
+ if (proc_queue_signal(c_p, to, (ErtsSignal *) mp, ERTS_SIG_Q_OP_SYNC_SUSPEND))
+ (void) maybe_elevate_sig_handling_prio(c_p, to);
+ else {
+ Eterm *tp;
+ /* It wasn't alive; reply to ourselves... */
+ mp->next = NULL;
+ mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ tp = tuple_val(msg);
+ tp[2] = async_suspend ? am_badarg : am_exited;
+ erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN,
+ mp, msg, am_system);
+ }
+}
+
+Eterm
+erts_proc_sig_send_rpc_request(Process *c_p,
+ Eterm to,
+ int reply,
+ Eterm (*func)(Process *, void *, int *, ErlHeapFragment **),
+ void *arg)
+{
+ Eterm res;
+ ErtsProcSigRPC *sig = erts_alloc(ERTS_ALC_T_SIG_DATA,
+ sizeof(ErtsProcSigRPC));
+ sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_RPC,
+ ERTS_SIG_Q_TYPE_UNDEFINED,
+ 0);
+ sig->requester = reply ? c_p->common.id : NIL;
+ sig->func = func;
+ sig->arg = arg;
+
+ if (!reply) {
+ res = am_ok;
+ sig->ref = am_ok;
+ }
+ else {
+ res = erts_make_ref(c_p);
+
+ sys_memcpy((void *) &sig->oref_thing,
+ (void *) internal_ref_val(res),
+ sizeof(ErtsORefThing));
+
+ sig->ref = make_internal_ref(&sig->oref_thing);
+
+ ERTS_RECV_MARK_SAVE(c_p);
+ ERTS_RECV_MARK_SET(c_p);
+ }
+
+ if (proc_queue_signal(c_p, to, (ErtsSignal *) sig, ERTS_SIG_Q_OP_RPC))
+ (void) maybe_elevate_sig_handling_prio(c_p, to);
+ else {
+ erts_free(ERTS_ALC_T_SIG_DATA, sig);
+ res = THE_NON_VALUE;
+ if (reply)
+ JOIN_MESSAGE(c_p);
+ }
+
+ return res;
+}
+
+static int
+handle_rpc(Process *c_p, ErtsProcSigRPC *rpc, int cnt, int limit, int *yieldp)
+{
+ Process *rp;
+ ErlHeapFragment *bp = NULL;
+ Eterm res;
+ Uint hsz;
+ int reds, out_cnt;
+
+ /*
+ * reds in:
+ * Reductions left.
+ *
+ * reds out:
+ * Absolute value of reds out equals consumed
+ * amount of reds. If a negative value, force
+ * a yield.
+ */
+
+ reds = (limit - cnt) / ERTS_SIG_REDS_CNT_FACTOR;
+ if (reds <= 0)
+ reds = 1;
+
+ res = (*rpc->func)(c_p, rpc->arg, &reds, &bp);
+
+ if (reds < 0) {
+ /* Force yield... */
+ *yieldp = !0;
+ reds *= -1;
+ }
+
+ out_cnt = reds*ERTS_SIG_REDS_CNT_FACTOR;
+
+ hsz = 3 + sizeof(ErtsORefThing)/sizeof(Eterm);
+
+ rp = erts_proc_lookup(rpc->requester);
+ if (!rp) {
+ if (bp)
+ free_message_buffer(bp);
+ }
+ else {
+ Eterm *hp, msg, ref;
+ ErtsMessage *mp = erts_alloc_message(hsz, &hp);
+
+ sys_memcpy((void *) hp, (void *) &rpc->oref_thing,
+ sizeof(rpc->oref_thing));
+
+ ref = make_internal_ref(hp);
+ hp += sizeof(rpc->oref_thing)/sizeof(Eterm);
+ msg = TUPLE2(hp, ref, res);
+
+ mp->hfrag.next = bp;
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_proc_message(c_p, rp, 0, mp, msg);
+ }
+
+ erts_free(ERTS_ALC_T_SIG_DATA, rpc);
+
+ return out_cnt;
+}
+
+static void
+is_alive_response(Process *c_p, ErtsMessage *mp, int is_alive)
+{
+ /*
+ * Sender prepared the message for us. Just patch
+ * the result if necessary. The default prepared
+ * result is 'false'.
+ */
+ Process *rp;
+ ErtsIsAliveRequest *alive_req;
+
+ alive_req = (ErtsIsAliveRequest *) (char *) (&mp->hfrag.mem[0]
+ + mp->hfrag.used_size);
+
+
+ ASSERT(ERTS_SIG_IS_NON_MSG(mp));
+ ASSERT(ERTS_PROC_SIG_OP(((ErtsSignal *) mp)->common.tag)
+ == ERTS_SIG_Q_OP_IS_ALIVE);
+ ASSERT(mp->hfrag.alloc_size > mp->hfrag.used_size);
+ ASSERT((mp->hfrag.alloc_size - mp->hfrag.used_size)*sizeof(UWord)
+ >= sizeof(ErtsIsAliveRequest));
+ ASSERT(is_internal_pid(alive_req->requester));
+ ASSERT(alive_req->requester != c_p->common.id);
+ ASSERT(is_tuple_arity(alive_req->message, 2));
+ ASSERT(is_internal_ordinary_ref(tuple_val(alive_req->message)[1]));
+ ASSERT(tuple_val(alive_req->message)[2] == am_false);
+
+ ERL_MESSAGE_TERM(mp) = alive_req->message;
+ mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ mp->next = NULL;
+
+ rp = erts_proc_lookup(alive_req->requester);
+ if (!rp)
+ erts_cleanup_messages(mp);
+ else {
+ if (is_alive) { /* patch result... */
+ Eterm *tp = tuple_val(alive_req->message);
+ tp[2] = am_true;
+ }
+ erts_queue_message(rp, 0, mp, alive_req->message, am_system);
+ }
+}
+
+static ERTS_INLINE void
+adjust_tracing_state(Process *c_p, ErtsSigRecvTracing *tracing, int setup)
+{
+ if (!IS_TRACED(c_p) || (ERTS_TRACE_FLAGS(c_p) & F_SENSITIVE)) {
+ tracing->messages.active = 0;
+ tracing->messages.receive_trace = 0;
+ tracing->messages.event = NULL;
+ tracing->messages.next = NULL;
+ tracing->procs = 0;
+ tracing->active = 0;
+ }
+ else {
+ Uint flgs = ERTS_TRACE_FLAGS(c_p);
+ int procs_trace = !!(flgs & F_TRACE_PROCS);
+ int recv_trace = !!(flgs & F_TRACE_RECEIVE);
+ /* procs tracing enabled? */
+
+ tracing->procs = procs_trace;
+
+ /* message receive tracing enabled? */
+ tracing->messages.receive_trace = recv_trace;
+ if (!recv_trace)
+ tracing->messages.event = NULL;
+ else {
+ if (tracing->messages.bp_ix < 0)
+ tracing->messages.bp_ix = erts_active_bp_ix();
+ tracing->messages.event = &erts_receive_tracing[tracing->messages.bp_ix];
+ }
+ if (setup) {
+ if (recv_trace)
+ tracing->messages.next = &c_p->sig_qs.cont;
+ else
+ tracing->messages.next = NULL;
+ }
+ tracing->messages.active = recv_trace;
+ tracing->active = recv_trace | procs_trace;
+ }
+
+#if defined(USE_VM_PROBES)
+ /* vm probe message_queued enabled? */
+
+ tracing->messages.vm_probes = DTRACE_ENABLED(message_queued);
+ if (tracing->messages.vm_probes) {
+ dtrace_proc_str(c_p, tracing->messages.receiver_name);
+ tracing->messages.active = !0;
+ tracing->active = !0;
+ if (setup && !tracing->messages.next)
+ tracing->messages.next = &c_p->sig_qs.cont;
+ }
+
+#endif
+}
+
+static ERTS_INLINE void
+setup_tracing_state(Process *c_p, ErtsSigRecvTracing *tracing)
+{
+ tracing->messages.bp_ix = -1;
+ adjust_tracing_state(c_p, tracing, !0);
+}
+
+static ERTS_INLINE void
+remove_iq_sig(Process *c_p, ErtsMessage *sig, ErtsMessage **next_sig)
+{
+ /*
+ * Remove signal from inner queue.
+ */
+ ASSERT(c_p->sig_qs.cont_last != &sig->next);
+ ASSERT(c_p->sig_qs.nmsigs.next != &sig->next);
+ ASSERT(c_p->sig_qs.nmsigs.last != &sig->next);
+
+ if (c_p->sig_qs.save == &sig->next)
+ c_p->sig_qs.save = next_sig;
+ if (c_p->sig_qs.last == &sig->next)
+ c_p->sig_qs.last = next_sig;
+ if (c_p->sig_qs.saved_last == &sig->next)
+ c_p->sig_qs.saved_last = next_sig;
+
+ *next_sig = sig->next;
+}
+
+static ERTS_INLINE void
+remove_mq_sig(Process *c_p, ErtsMessage *sig,
+ ErtsMessage **next_sig, ErtsMessage ***next_nm_sig)
+{
+ /*
+ * Remove signal from middle queue.
+ */
+ ASSERT(c_p->sig_qs.save != &sig->next);
+ ASSERT(c_p->sig_qs.last != &sig->next);
+
+ if (c_p->sig_qs.cont_last == &sig->next)
+ c_p->sig_qs.cont_last = next_sig;
+ if (c_p->sig_qs.saved_last == &sig->next)
+ c_p->sig_qs.saved_last = next_sig;
+ if (*next_nm_sig == &sig->next)
+ *next_nm_sig = next_sig;
+ if (c_p->sig_qs.nmsigs.last == &sig->next)
+ c_p->sig_qs.nmsigs.last = next_sig;
+
+ *next_sig = sig->next;
+}
+
+static ERTS_INLINE void
+remove_nm_sig(Process *c_p, ErtsMessage *sig, ErtsMessage ***next_nm_sig)
+{
+ ErtsMessage **next_sig = *next_nm_sig;
+ ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+ ASSERT(*next_sig == sig);
+ *next_nm_sig = ((ErtsSignal *) sig)->common.specific.next;
+ remove_mq_sig(c_p, sig, next_sig, next_nm_sig);
+}
+
+static ERTS_INLINE void
+convert_to_msg(Process *c_p, ErtsMessage *sig, ErtsMessage *msg,
+ ErtsMessage ***next_nm_sig)
+{
+ ErtsMessage **next_sig = *next_nm_sig;
+ ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+ *next_nm_sig = ((ErtsSignal *) sig)->common.specific.next;
+ c_p->sig_qs.len++;
+ *next_sig = msg;
+ remove_mq_sig(c_p, sig, &msg->next, next_nm_sig);
+}
+
+static ERTS_INLINE void
+convert_to_msgs(Process *c_p, ErtsMessage *sig, Uint no_msgs,
+ ErtsMessage *first_msg, ErtsMessage *last_msg,
+ ErtsMessage ***next_nm_sig)
+{
+ ErtsMessage **next_sig = *next_nm_sig;
+ ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+ *next_nm_sig = ((ErtsSignal *) sig)->common.specific.next;
+ c_p->sig_qs.len += no_msgs;
+ *next_sig = first_msg;
+ remove_mq_sig(c_p, sig, &last_msg->next, next_nm_sig);
+}
+
+static ERTS_INLINE void
+insert_messages(Process *c_p, ErtsMessage **next, ErtsMessage *first,
+ ErtsMessage *last, Uint no_msgs, ErtsMessage ***next_nm_sig)
+{
+ last->next = *next;
+ if (c_p->sig_qs.cont_last == next)
+ c_p->sig_qs.cont_last = &last->next;
+ if (*next_nm_sig == next)
+ *next_nm_sig = &last->next;
+ if (c_p->sig_qs.nmsigs.last == next)
+ c_p->sig_qs.nmsigs.last = &last->next;
+ c_p->sig_qs.len += no_msgs;
+ *next = first;
+}
+
+static ERTS_INLINE void
+remove_mq_m_sig(Process *c_p, ErtsMessage *sig, ErtsMessage **next_sig, ErtsMessage ***next_nm_sig)
+{
+ /* Removing message... */
+ ASSERT(!ERTS_SIG_IS_NON_MSG(sig));
+ c_p->sig_qs.len--;
+ remove_mq_sig(c_p, sig, next_sig, next_nm_sig);
+}
+
+static ERTS_INLINE void
+remove_iq_m_sig(Process *c_p, ErtsMessage *sig, ErtsMessage **next_sig)
+{
+ /* Removing message... */
+ ASSERT(!ERTS_SIG_IS_NON_MSG(sig));
+ c_p->sig_qs.len--;
+ remove_iq_sig(c_p, sig, next_sig);
+}
+
+static ERTS_INLINE void
+convert_prepared_sig_to_msg(Process *c_p, ErtsMessage *sig, Eterm msg,
+ ErtsMessage ***next_nm_sig)
+{
+ /*
+ * Everything is already there except for the reference to
+ * the message and the combined hfrag marker that needs to be
+ * restored...
+ */
+ *next_nm_sig = ((ErtsSignal *) sig)->common.specific.next;
+ sig->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ ERL_MESSAGE_TERM(sig) = msg;
+ c_p->sig_qs.len++;
+}
+
+static ERTS_INLINE int
+handle_exit_signal(Process *c_p, ErtsSigRecvTracing *tracing,
+ ErtsMessage *sig, ErtsMessage ***next_nm_sig,
+ int *exited)
+{
+ ErtsMessage *conv_msg = NULL;
+ ErtsExitSignalData *xsigd = NULL;
+ ErtsLinkData *ldp = NULL; /* Avoid erroneous warning... */
+ ErtsLink *dlnk = NULL; /* Avoid erroneous warning... */
+ Eterm tag = ((ErtsSignal *) sig)->common.tag;
+ Uint16 type = ERTS_PROC_SIG_TYPE(tag);
+ int op = ERTS_PROC_SIG_OP(tag);
+ int destroy = 0;
+ int ignore = 0;
+ int save = 0;
+ int exit = 0;
+ int cnt = 1;
+ Eterm reason;
+ Eterm from;
+
+ if (type == ERTS_SIG_Q_TYPE_GEN_EXIT) {
+ xsigd = get_exit_signal_data(sig);
+ from = xsigd->from;
+ reason = xsigd->reason;
+ if (op != ERTS_SIG_Q_OP_EXIT_LINKED)
+ ignore = 0;
+ else {
+ ErtsLink *llnk = erts_link_tree_lookup(ERTS_P_LINKS(c_p), from);
+ if (!llnk) {
+ /* Link no longer active; ignore... */
+ ignore = !0;
+ destroy = !0;
+ }
+ else {
+ ignore = 0;
+ erts_link_tree_delete(&ERTS_P_LINKS(c_p), llnk);
+ if (llnk->type != ERTS_LNK_TYPE_DIST_PROC)
+ erts_link_release(llnk);
+ else {
+ dlnk = erts_link_to_other(llnk, &ldp);
+ if (erts_link_dist_delete(dlnk))
+ erts_link_release_both(ldp);
+ else
+ erts_link_release(llnk);
+ }
+ }
+ }
+
+ if (!ignore) {
+
+ if ((op != ERTS_SIG_Q_OP_EXIT || reason != am_kill)
+ && (c_p->flags & F_TRAP_EXIT)) {
+ convert_prepared_sig_to_msg(c_p, sig,
+ xsigd->message, next_nm_sig);
+ conv_msg = sig;
+ }
+ else if (reason == am_normal && !xsigd->u.normal_kills) {
+ /* Ignore it... */
+ destroy = !0;
+ ignore = !0;
+ }
+ else {
+ /* Terminate... */
+ save = !0;
+ exit = !0;
+ if (op == ERTS_SIG_Q_OP_EXIT && reason == am_kill)
+ reason = am_killed;
+ }
+ }
+ }
+ else { /* Link exit */
+ ErtsLink *slnk = (ErtsLink *) sig;
+ ErtsLink *llnk = erts_link_to_other(slnk, &ldp);
+
+ ASSERT(type == ERTS_LNK_TYPE_PROC
+ || type == ERTS_LNK_TYPE_PORT
+ || type == ERTS_LNK_TYPE_DIST_PROC);
+
+ from = llnk->other.item;
+ reason = slnk->other.item; /* reason in other.item ... */
+ ASSERT(is_pid(from) || is_internal_port(from));
+ ASSERT(is_immed(reason));
+ ASSERT(op == ERTS_SIG_Q_OP_EXIT_LINKED);
+ dlnk = erts_link_tree_key_delete(&ERTS_P_LINKS(c_p), llnk);
+ if (!dlnk) {
+ ignore = !0; /* Link no longer active; ignore... */
+ ldp = NULL;
+ }
+ else {
+ Eterm pid;
+ ErtsMessage *mp;
+ ErtsProcLocks locks;
+ Uint hsz;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+ ignore = 0;
+ if (dlnk == llnk)
+ dlnk = NULL;
+ else
+ ldp = NULL;
+
+ ASSERT(is_immed(reason));
+
+ if (!(c_p->flags & F_TRAP_EXIT)) {
+ if (reason == am_normal)
+ ignore = !0; /* Ignore it... */
+ else
+ exit = !0; /* Terminate... */
+ }
+ else {
+
+ /*
+ * Create and EXIT message and replace
+ * the original signal with the message...
+ */
+
+ locks = ERTS_PROC_LOCK_MAIN;
+
+ hsz = 4 + NC_HEAP_SIZE(from);
+
+ mp = erts_alloc_message_heap(c_p, &locks, hsz, &hp, &ohp);
+
+ if (locks != ERTS_PROC_LOCK_MAIN)
+ erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
+
+ pid = STORE_NC(&hp, ohp, from);
+
+ ERL_MESSAGE_TERM(mp) = TUPLE3(hp, am_EXIT, pid, reason);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ if (is_immed(pid))
+ ERL_MESSAGE_FROM(mp) = pid;
+ else {
+ DistEntry *dep;
+ ASSERT(is_external_pid(pid));
+ dep = external_pid_dist_entry(pid);
+ ERL_MESSAGE_FROM(mp) = dep->sysname;
+ }
+
+ /* Replace original signal with the exit message... */
+ convert_to_msg(c_p, sig, mp, next_nm_sig);
+
+ cnt += 4;
+
+ conv_msg = mp;
+ }
+ }
+ destroy = !0;
+ }
+
+ if (ignore|exit) {
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ if (exit) {
+ if (save) {
+ sig->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ ERL_MESSAGE_TERM(sig) = xsigd->message;
+ erts_save_message_in_proc(c_p, sig);
+ }
+ /* Exit process... */
+ erts_set_self_exiting(c_p, reason);
+
+ cnt++;
+ }
+ }
+
+ if (!exit) {
+ if (conv_msg)
+ erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
+ if (op == ERTS_SIG_Q_OP_EXIT_LINKED && tracing->procs)
+ getting_unlinked(c_p, from);
+ }
+
+ if (destroy) {
+ cnt++;
+ if (type == ERTS_SIG_Q_TYPE_GEN_EXIT) {
+ sig->next = NULL;
+ erts_cleanup_messages(sig);
+ }
+ else {
+ if (ldp)
+ erts_link_release_both(ldp);
+ else {
+ if (dlnk)
+ erts_link_release(dlnk);
+ erts_link_release((ErtsLink *) sig);
+ }
+ }
+ }
+
+ *exited = exit;
+
+ return cnt;
+}
+
+static ERTS_INLINE int
+convert_prepared_down_message(Process *c_p, ErtsMessage *sig,
+ Eterm msg, ErtsMessage ***next_nm_sig)
+{
+ convert_prepared_sig_to_msg(c_p, sig, msg, next_nm_sig);
+ erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
+ return 1;
+}
+
+static int
+convert_to_down_message(Process *c_p,
+ ErtsMessage *sig,
+ ErtsMonitorData *mdp,
+ Uint16 mon_type,
+ ErtsMessage ***next_nm_sig)
+{
+ /*
+ * Create a 'DOWN' message and replace the signal
+ * with it...
+ */
+ int cnt = 0;
+ Eterm node = am_undefined;
+ ErtsMessage *mp;
+ ErtsProcLocks locks;
+ Uint hsz;
+ Eterm *hp, ref, from, type, reason;
+ ErlOffHeap *ohp;
+
+ ASSERT(mdp);
+ ASSERT((mdp->origin.flags & ERTS_ML_FLGS_SAME)
+ == (mdp->target.flags & ERTS_ML_FLGS_SAME));
+
+ hsz = 6; /* 5-tuple */
+
+ if (mdp->origin.flags & ERTS_ML_FLG_NAME)
+ hsz += 3; /* reg name 2-tuple */
+ else {
+ ASSERT(is_pid(mdp->origin.other.item)
+ || is_internal_port(mdp->origin.other.item));
+ hsz += NC_HEAP_SIZE(mdp->origin.other.item);
+ }
+
+ ASSERT(is_ref(mdp->ref));
+ hsz += NC_HEAP_SIZE(mdp->ref);
+
+ locks = ERTS_PROC_LOCK_MAIN;
+
+ /* reason is mdp->target.other.item */
+ reason = mdp->target.other.item;
+ ASSERT(is_immed(reason));
+
+ mp = erts_alloc_message_heap(c_p, &locks, hsz, &hp, &ohp);
+
+ if (locks != ERTS_PROC_LOCK_MAIN)
+ erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
+
+ cnt += 4;
+
+ ref = STORE_NC(&hp, ohp, mdp->ref);
+
+ if (!(mdp->origin.flags & ERTS_ML_FLG_NAME)) {
+ from = STORE_NC(&hp, ohp, mdp->origin.other.item);
+ }
+ else {
+ ErtsMonitorDataExtended *mdep;
+ ASSERT(mdp->origin.flags & ERTS_ML_FLG_EXTENDED);
+ mdep = (ErtsMonitorDataExtended *) mdp;
+ ASSERT(is_atom(mdep->u.name));
+ if (mdep->dist)
+ node = mdep->dist->nodename;
+ else
+ node = erts_this_dist_entry->sysname;
+ from = TUPLE2(hp, mdep->u.name, node);
+ hp += 3;
+ }
+
+ ASSERT(mdp->origin.type == mon_type);
+ switch (mon_type) {
+ case ERTS_MON_TYPE_PORT:
+ type = am_port;
+ if (mdp->origin.other.item == am_undefined) {
+ /* failed by name... */
+ ERL_MESSAGE_FROM(mp) = am_system;
+ }
+ else {
+ ASSERT(is_internal_port(mdp->origin.other.item));
+ ERL_MESSAGE_FROM(mp) = mdp->origin.other.item;
+ }
+ break;
+ case ERTS_MON_TYPE_PROC:
+ type = am_process;
+ if (mdp->origin.other.item == am_undefined) {
+ /* failed by name... */
+ ERL_MESSAGE_FROM(mp) = am_system;
+ }
+ else {
+ ASSERT(is_internal_pid(mdp->origin.other.item));
+ ERL_MESSAGE_FROM(mp) = mdp->origin.other.item;
+ }
+ break;
+ case ERTS_MON_TYPE_DIST_PROC:
+ type = am_process;
+ if (node == am_undefined) {
+ ErtsMonitorDataExtended *mdep;
+ ASSERT(mdp->origin.flags & ERTS_ML_FLG_EXTENDED);
+ mdep = (ErtsMonitorDataExtended *) mdp;
+ ASSERT(mdep->dist);
+ node = mdep->dist->nodename;
+ }
+ ASSERT(is_atom(node) && node != am_undefined);
+ ERL_MESSAGE_FROM(mp) = node;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected monitor type");
+ type = am_undefined;
+ ERL_MESSAGE_FROM(mp) = am_undefined;
+ break;
+ }
+
+ ERL_MESSAGE_TERM(mp) = TUPLE5(hp, am_DOWN, ref,
+ type, from, reason);
+ hp += 6;
+
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ /* Replace original signal with the exit message... */
+ convert_to_msg(c_p, sig, mp, next_nm_sig);
+
+ cnt += 4;
+
+ erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
+
+ return cnt;
+}
+
+static ERTS_INLINE int
+convert_to_nodedown_messages(Process *c_p,
+ ErtsMessage *sig,
+ ErtsMonitorData *mdp,
+ ErtsMessage ***next_nm_sig)
+{
+ int cnt = 1;
+ Uint n;
+ ErtsMonitorDataExtended *mdep = (ErtsMonitorDataExtended *) mdp;
+
+ ASSERT((mdp->origin.flags & ERTS_ML_FLGS_SAME)
+ == (mdp->target.flags & ERTS_ML_FLGS_SAME));
+ ASSERT(mdp->origin.flags & ERTS_ML_FLG_EXTENDED);
+
+ n = mdep->u.refc;
+
+ if (n == 0)
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ else {
+ Uint i;
+ ErtsMessage *nd_first = NULL;
+ ErtsMessage *nd_last = NULL;
+ ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
+ Eterm node = mdep->dist->nodename;
+
+ ASSERT(is_atom(node));
+ ASSERT(n > 0);
+
+ for (i = 0; i < n; i++) {
+ ErtsMessage *mp;
+ ErlOffHeap *ohp;
+ Eterm *hp;
+
+ mp = erts_alloc_message_heap(c_p, &locks, 3, &hp, &ohp);
+
+ ERL_MESSAGE_TERM(mp) = TUPLE2(hp, am_nodedown, node);
+ ERL_MESSAGE_FROM(mp) = am_system;
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ mp->next = nd_first;
+ nd_first = mp;
+ if (!nd_last)
+ nd_last = mp;
+ cnt++;
+ }
+
+ if (locks != ERTS_PROC_LOCK_MAIN)
+ erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
+
+ /* Replace signal with 'nodedown' messages */
+ convert_to_msgs(c_p, sig, n, nd_first, nd_last, next_nm_sig);
+
+ erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ return cnt;
+}
+
+static int
+handle_nodedown(Process *c_p,
+ ErtsMessage *sig,
+ ErtsMonitorData *mdp,
+ ErtsMessage ***next_nm_sig)
+{
+ ErtsMonitorDataExtended *mdep = (ErtsMonitorDataExtended *) mdp;
+ ErtsMonitor *omon = &mdp->origin;
+ int not_in_subtab = !(omon->flags & ERTS_ML_FLG_IN_SUBTABLE);
+ int cnt = 1;
+
+ ASSERT(erts_monitor_is_in_table(omon));
+
+ if (not_in_subtab & !mdep->uptr.node_monitors)
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), omon);
+ else if (not_in_subtab) {
+ ErtsMonitor *sub_mon;
+ ErtsMonitorDataExtended *sub_mdep;
+ sub_mon = erts_monitor_list_last(mdep->uptr.node_monitors);
+ ASSERT(sub_mon);
+ erts_monitor_list_delete(&mdep->uptr.node_monitors, sub_mon);
+ sub_mon->flags &= ~ERTS_ML_FLG_IN_SUBTABLE;
+ sub_mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(sub_mon);
+ ASSERT(!sub_mdep->uptr.node_monitors);
+ sub_mdep->uptr.node_monitors = mdep->uptr.node_monitors;
+ mdep->uptr.node_monitors = NULL;
+ erts_monitor_tree_replace(&ERTS_P_MONITORS(c_p), omon, sub_mon);
+ cnt += 2;
+ }
+ else {
+ ErtsMonitorDataExtended *top_mdep;
+ ErtsMonitor *top_mon;
+ ASSERT(is_atom(omon->other.item));
+ ASSERT(!mdep->uptr.node_monitors);
+ top_mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p),
+ omon->other.item);
+ ASSERT(top_mon);
+ top_mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(top_mon);
+ ASSERT(top_mdep->uptr.node_monitors);
+ erts_monitor_list_delete(&top_mdep->uptr.node_monitors, omon);
+ omon->flags &= ~ERTS_ML_FLG_IN_SUBTABLE;
+ cnt += 3;
+ }
+
+ return cnt + convert_to_nodedown_messages(c_p, sig, mdp, next_nm_sig);
+}
+
+static void
+handle_persistent_mon_msg(Process *c_p, Uint16 type,
+ ErtsMonitor *mon, ErtsMessage *sig,
+ Eterm msg, ErtsMessage ***next_nm_sig)
+{
+ convert_prepared_sig_to_msg(c_p, sig, msg, next_nm_sig);
+
+ switch (type) {
+
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ ASSERT(mon->type == ERTS_MON_TYPE_TIME_OFFSET);
+ break;
+
+ case ERTS_MON_TYPE_NODES: {
+ ErtsMonitorDataExtended *mdep;
+ Uint n;
+ ASSERT(mon->type == ERTS_MON_TYPE_NODES);
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
+ ERTS_ML_ASSERT(mdep->u.refc > 0);
+ n = mdep->u.refc;
+ n--;
+ if (n > 0) {
+ ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
+ ErtsMessage *first = NULL, *prev, *last;
+ Uint hsz = size_object(msg);
+ Uint i;
+
+ for (i = 0; i < n; i++) {
+ Eterm *hp;
+ ErlOffHeap *ohp;
+
+ last = erts_alloc_message_heap(c_p, &locks, hsz, &hp, &ohp);
+
+ if (!first)
+ first = last;
+ else
+ prev->next = last;
+ prev = last;
+
+ ERL_MESSAGE_TERM(last) = copy_struct(msg, hsz, &hp, ohp);
+
+#ifdef USE_VM_PROBES
+ ASSERT(is_immed(ERL_MESSAGE_DT_UTAG(sig)));
+ ERL_MESSAGE_DT_UTAG(last) = ERL_MESSAGE_DT_UTAG(sig);
+#endif
+ ASSERT(is_immed(ERL_MESSAGE_TOKEN(sig)));
+ ERL_MESSAGE_TOKEN(last) = ERL_MESSAGE_TOKEN(sig);
+ ASSERT(is_immed(ERL_MESSAGE_FROM(sig)));
+ ERL_MESSAGE_FROM(last) = ERL_MESSAGE_FROM(sig);
+
+ }
+ if (locks != ERTS_PROC_LOCK_MAIN)
+ erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
+ insert_messages(c_p, &sig->next, first, last, n, next_nm_sig);
+ }
+ break;
+ }
+
+ default:
+ ERTS_INTERNAL_ERROR("Invalid type");
+ break;
+ }
+
+ erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
+}
+
+static void
+group_leader_reply(Process *c_p, Eterm to, Eterm ref, int success)
+{
+ Process *rp = erts_proc_lookup(to);
+
+ if (rp) {
+ ErtsProcLocks locks;
+ Uint sz;
+ Eterm *hp, msg, ref_cpy, result;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+
+ ASSERT(is_internal_ref(ref));
+
+ locks = c_p == rp ? ERTS_PROC_LOCK_MAIN : 0;
+ sz = size_object(ref);
+
+ mp = erts_alloc_message_heap(rp, &locks, sz+3,
+ &hp, &ohp);
+
+ ref_cpy = copy_struct(ref, sz, &hp, ohp);
+ result = success ? am_true : am_badarg;
+ msg = TUPLE2(hp, ref_cpy, result);
+
+ erts_queue_message(rp, locks, mp, msg, am_system);
+
+ if (c_p == rp)
+ locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ if (locks)
+ erts_proc_unlock(rp, locks);
+ }
+}
+
+static void
+handle_group_leader(Process *c_p, ErtsSigGroupLeader *sgl)
+{
+ erts_aint_t flags;
+
+ flags = erts_atomic_read_band_nob(&sgl->flags, ~ERTS_SIG_GL_FLG_ACTIVE);
+ if (flags & ERTS_SIG_GL_FLG_ACTIVE) {
+ int res = erts_set_group_leader(c_p, sgl->group_leader);
+ if (is_internal_pid(sgl->reply_to))
+ group_leader_reply(c_p, sgl->reply_to, sgl->ref, res);
+ }
+
+ flags = erts_atomic_read_band_nob(&sgl->flags, ~ERTS_SIG_GL_FLG_RECEIVER);
+ if ((flags & ~ERTS_SIG_GL_FLG_RECEIVER) == 0)
+ destroy_sig_group_leader(sgl);
+}
+
+static void
+check_push_msgq_len_offs_marker(Process *rp, ErtsSignal *sig)
+{
+ ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig;
+
+ ASSERT(ERTS_PROC_SIG_OP(sig->common.tag) == ERTS_SIG_Q_OP_PROCESS_INFO);
+
+ if (pisig->msgq_len_offset == ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC) {
+ ErtsProcSigMsgQLenOffsetMarker *mrkr;
+ Sint len, msgq_len_offset;
+ ErtsMessage *first = rp->sig_inq.first;
+ ASSERT(first);
+ if (((ErtsSignal *) first)->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK)
+ mrkr = (ErtsProcSigMsgQLenOffsetMarker *) first;
+ else {
+ mrkr = &pisig->marker;
+
+ ASSERT(mrkr->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK);
+
+ mrkr->common.next = first;
+ ASSERT(rp->sig_inq.last != &rp->sig_inq.first);
+ if (rp->sig_inq.nmsigs.next == &rp->sig_inq.first)
+ rp->sig_inq.nmsigs.next = &mrkr->common.next;
+ if (rp->sig_inq.nmsigs.last == &rp->sig_inq.first)
+ rp->sig_inq.nmsigs.last = &mrkr->common.next;
+ rp->sig_inq.first = (ErtsMessage *) mrkr;
+ }
+
+ len = rp->sig_inq.len;
+ msgq_len_offset = len - mrkr->len_offset;
+
+ mrkr->len_offset = len;
+ mrkr->refc++;
+
+ pisig->msgq_len_offset = msgq_len_offset;
+
+#ifdef DEBUG
+ /* save pointer to used marker... */
+ pisig->marker.common.specific.attachment = (void *) mrkr;
+#endif
+
+ }
+}
+
+static void
+destroy_process_info_request(Process *c_p, ErtsProcessInfoSig *pisig)
+{
+ int dealloc_pisig = !0;
+
+ if (pisig->msgq_len_offset != ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE) {
+ Sint refc;
+ int dealloc_marker = 0;
+ ErtsProcSigMsgQLenOffsetMarker *marker;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ Sint delayed_len;
+#endif
+
+ ASSERT(pisig->msgq_len_offset >= 0);
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ marker = (ErtsProcSigMsgQLenOffsetMarker *) c_p->sig_inq.first;
+ ASSERT(marker);
+ ASSERT(marker->refc > 0);
+ ASSERT(pisig->marker.common.specific.attachment == (void *) marker);
+
+ marker->delayed_len -= pisig->msgq_len_offset;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ delayed_len = marker->delayed_len;
+#endif
+
+ refc = --marker->refc;
+ if (refc) {
+ if (marker == &pisig->marker) {
+ /* Another signal using our marker... */
+ dealloc_pisig = 0;
+ }
+ }
+ else {
+ /* Marker unused; remove it... */
+ ASSERT(marker->delayed_len + marker->len_offset == 0);
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ delayed_len += marker->len_offset;
+#endif
+ if (marker != &pisig->marker)
+ dealloc_marker = !0; /* used another signals marker... */
+ c_p->sig_inq.first = marker->common.next;
+ if (c_p->sig_inq.last == &marker->common.next)
+ c_p->sig_inq.last = &c_p->sig_inq.first;
+ if (c_p->sig_inq.nmsigs.next == &marker->common.next)
+ c_p->sig_inq.nmsigs.next = &c_p->sig_inq.first;
+ if (c_p->sig_inq.nmsigs.last == &marker->common.next)
+ c_p->sig_inq.nmsigs.last = &c_p->sig_inq.first;
+ }
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+
+ if (!refc) {
+ c_p->flags &= ~F_DELAYED_PSIGQS_LEN;
+ /* Adjust msg len of inner+middle queue */
+ ASSERT(marker->len_offset <= 0);
+ c_p->sig_qs.len -= marker->len_offset;
+
+ ASSERT(c_p->sig_qs.len >= 0);
+ }
+
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ {
+ Sint len = 0;
+ ERTS_FOREACH_SIG_PRIVQS(
+ c_p, mp,
+ {
+ if (ERTS_SIG_IS_MSG(mp))
+ len++;
+ });
+ ERTS_ASSERT(c_p->sig_qs.len + delayed_len == len);
+ }
+#endif
+
+
+ if (dealloc_marker) {
+ ErtsProcessInfoSig *pisig2
+ = (ErtsProcessInfoSig *) (((char *) marker)
+ - offsetof(ErtsProcessInfoSig,
+ marker));
+ erts_free(ERTS_ALC_T_SIG_DATA, pisig2);
+ }
+ }
+
+ if (dealloc_pisig)
+ erts_free(ERTS_ALC_T_SIG_DATA, pisig);
+}
+
+static int
+handle_process_info(Process *c_p, ErtsSigRecvTracing *tracing,
+ ErtsMessage *sig, ErtsMessage ***next_nm_sig,
+ int is_alive)
+{
+ ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig;
+ Uint reds = 0;
+ Process *rp;
+
+ ASSERT(!!is_alive == !(erts_atomic32_read_nob(&c_p->state)
+ & ERTS_PSFLG_EXITING));
+
+ if (pisig->msgq_len_offset != ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE) {
+ /*
+ * Request requires message queue data to be updated
+ * before inspection...
+ */
+
+ ASSERT(pisig->msgq_len_offset >= 0);
+ /*
+ * Update sig_qs.len to reflect the length
+ * of the message queue...
+ */
+ c_p->sig_qs.len += pisig->msgq_len_offset;
+
+ if (is_alive) {
+ /*
+ * Move messages part of message queue into inner
+ * signal queue...
+ */
+ ASSERT(tracing);
+
+ if (*next_nm_sig != &c_p->sig_qs.cont) {
+ if (*next_nm_sig == tracing->messages.next)
+ tracing->messages.next = &c_p->sig_qs.cont;
+ *c_p->sig_qs.last = c_p->sig_qs.cont;
+ c_p->sig_qs.last = *next_nm_sig;
+
+ c_p->sig_qs.cont = **next_nm_sig;
+ if (c_p->sig_qs.nmsigs.last == *next_nm_sig)
+ c_p->sig_qs.nmsigs.last = &c_p->sig_qs.cont;
+ *next_nm_sig = &c_p->sig_qs.cont;
+ *c_p->sig_qs.last = NULL;
+ }
+
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ {
+ Sint len;
+ ErtsMessage *mp;
+ for (mp = c_p->sig_qs.first, len = 0; mp; mp = mp->next) {
+ ERTS_ASSERT(ERTS_SIG_IS_MSG(mp));
+ len++;
+ }
+ ERTS_ASSERT(c_p->sig_qs.len == len);
+ }
+#endif
+ }
+ }
+ if (is_alive) {
+ if (!pisig->common.specific.next) {
+ /*
+ * No more signals in middle queue...
+ *
+ * Process-info 'status' needs sig-q
+ * process flag to be updated in order
+ * to show accurate result...
+ */
+ erts_atomic32_read_band_nob(&c_p->state,
+ ~ERTS_PSFLG_SIG_Q);
+ }
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ }
+
+ rp = erts_proc_lookup(pisig->requester);
+ ASSERT(c_p != rp);
+ if (rp) {
+ Eterm msg, res, ref, *hp;
+ ErtsProcLocks locks = 0;
+ ErtsHeapFactory hfact;
+ ErtsMessage *mp;
+ Uint reserve_size = 3 + sizeof(pisig->oref_thing)/sizeof(Eterm);
+
+ if (!is_alive) {
+ ErlOffHeap *ohp;
+ mp = erts_alloc_message_heap(rp, &locks, reserve_size, &hp, &ohp);
+ res = am_undefined;
+ }
+ else {
+ ErlHeapFragment *hfrag;
+
+ reserve_size += pisig->reserve_size;
+
+ mp = erts_alloc_message(0, NULL);
+ hfrag = new_message_buffer(reserve_size);
+ mp->data.heap_frag = hfrag;
+ erts_factory_selfcontained_message_init(&hfact, mp, &hfrag->mem[0]);
+
+ res = erts_process_info(c_p, &hfact, c_p, ERTS_PROC_LOCK_MAIN,
+ pisig->item_ix, pisig->len,
+ pisig->flags, reserve_size, &reds);
+
+ hp = erts_produce_heap(&hfact,
+ 3 + sizeof(pisig->oref_thing)/sizeof(Eterm),
+ 0);
+ }
+
+ sys_memcpy((void *) hp, (void *) &pisig->oref_thing,
+ sizeof(pisig->oref_thing));
+ ref = make_internal_ref(hp);
+ hp += sizeof(pisig->oref_thing)/sizeof(Eterm);
+
+ msg = TUPLE2(hp, ref, res);
+
+ if (is_alive)
+ erts_factory_trim_and_close(&hfact, &msg, 1);
+
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_proc_message(c_p, rp, locks, mp, msg);
+
+ if (!is_alive && locks)
+ erts_proc_unlock(rp, locks);
+ }
+
+ destroy_process_info_request(c_p, pisig);
+
+ if (reds > INT_MAX/8)
+ reds = INT_MAX/8;
+
+ return ((int) reds)*4 + 8;
+}
+
+static void
+handle_suspend(Process *c_p, ErtsMonitor *mon, int *yieldp)
+{
+ erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
+
+ ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND);
+
+ if (!(state & ERTS_PSFLG_DIRTY_RUNNING)) {
+ ErtsMonitorSuspend *msp;
+ erts_aint_t mstate;
+
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
+ mstate = erts_atomic_read_bor_acqb(&msp->state,
+ ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)); (void) mstate;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ *yieldp = !0;
+ }
+ else {
+ /* Executing dirty; delay suspend... */
+ ErtsProcSigPendingSuspend *psusp;
+ ErtsMonitorSuspend *msp;
+
+ psusp = ERTS_PROC_GET_PENDING_SUSPEND(c_p);
+ if (!psusp) {
+ psusp = erts_alloc(ERTS_ALC_T_SIG_DATA,
+ sizeof(ErtsProcSigPendingSuspend));
+ psusp->mon = NULL;
+ psusp->sync = NULL;
+ ERTS_PROC_SET_PENDING_SUSPEND(c_p, (void *) psusp);
+ }
+
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
+
+ msp->next = psusp->mon;
+ psusp->mon = msp;
+
+ erts_atomic32_inc_nob(&msp->md.refc);
+ }
+}
+
+static void
+sync_suspend_reply(Process *c_p, ErtsMessage *mp, erts_aint32_t state)
+{
+ /*
+ * Sender prepared the message for us. Just patch
+ * the result if necessary. The default prepared
+ * result is 'false'.
+ */
+ Process *rp;
+ ErtsSyncSuspendRequest *ssusp;
+
+ ssusp = (ErtsSyncSuspendRequest *) (char *) (&mp->hfrag.mem[0]
+ + mp->hfrag.used_size);
+
+ ASSERT(ERTS_SIG_IS_NON_MSG(mp));
+ ASSERT(ERTS_PROC_SIG_OP(((ErtsSignal *) mp)->common.tag)
+ == ERTS_SIG_Q_OP_SYNC_SUSPEND);
+ ASSERT(mp->hfrag.alloc_size > mp->hfrag.used_size);
+ ASSERT((mp->hfrag.alloc_size - mp->hfrag.used_size)*sizeof(UWord)
+ >= sizeof(ErtsSyncSuspendRequest));
+ ASSERT(is_internal_pid(ssusp->requester));
+ ASSERT(ssusp->requester != c_p->common.id);
+ ASSERT(is_tuple_arity(ssusp->message, 2));
+ ASSERT(is_immed(tuple_val(ssusp->message)[2]));
+
+ ERL_MESSAGE_TERM(mp) = ssusp->message;
+ mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ mp->next = NULL;
+
+ rp = erts_proc_lookup(ssusp->requester);
+ if (!rp)
+ erts_cleanup_messages(mp);
+ else {
+ if ((state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_SUSPENDED)) != ERTS_PSFLG_SUSPENDED) {
+ /* Not suspended -> patch result... */
+ if (state & ERTS_PSFLG_EXITING) {
+ Eterm *tp = tuple_val(ssusp->message);
+ tp[2] = ssusp->async ? am_exited : am_badarg;
+ }
+ else {
+ Eterm *tp = tuple_val(ssusp->message);
+ ASSERT(!(state & ERTS_PSFLG_SUSPENDED));
+ tp[2] = ssusp->async ? am_not_suspended : am_internal_error;
+ }
+ }
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_proc_message(c_p, rp, 0, mp, ssusp->message);
+ }
+}
+
+static void
+handle_sync_suspend(Process *c_p, ErtsMessage *mp)
+{
+ ErtsProcSigPendingSuspend *psusp;
+
+ psusp = (ErtsProcSigPendingSuspend *) ERTS_PROC_GET_PENDING_SUSPEND(c_p);
+ if (!psusp)
+ sync_suspend_reply(c_p, mp, erts_atomic32_read_nob(&c_p->state));
+ else {
+ mp->next = psusp->sync;
+ psusp->sync = mp;
+ }
+}
+
+void
+erts_proc_sig_handle_pending_suspend(Process *c_p)
+{
+ ErtsMonitorSuspend *msp;
+ ErtsMessage *sync;
+ ErtsProcSigPendingSuspend *psusp;
+ erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
+
+ psusp = (ErtsProcSigPendingSuspend *) ERTS_PROC_GET_PENDING_SUSPEND(c_p);
+
+ msp = psusp->mon;
+
+ while (msp) {
+ ErtsMonitorSuspend *next_msp = msp->next;
+ msp->next = NULL;
+ if (!(state & ERTS_PSFLG_EXITING)
+ && erts_monitor_is_in_table(&msp->md.target)) {
+ erts_aint_t mstate;
+
+ mstate = erts_atomic_read_bor_acqb(&msp->state,
+ ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)); (void) mstate;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ }
+
+ erts_monitor_release(&msp->md.target);
+
+ msp = next_msp;
+ }
+
+ sync = psusp->sync;
+
+ while (sync) {
+ ErtsMessage *next_sync = sync->next;
+ sync->next = NULL;
+ sync_suspend_reply(c_p, sync, state);
+ sync = next_sync;
+ }
+
+ erts_free(ERTS_ALC_T_SIG_DATA, psusp);
+
+ ERTS_PROC_SET_PENDING_SUSPEND(c_p, NULL);
+}
+
+/*
+ * Called in order to handle incoming signals.
+ */
+
+int
+erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
+ int *redsp, int max_reds, int local_only)
+{
+ Eterm tag;
+ erts_aint32_t state;
+ int yield, cnt, limit, abs_lim, msg_tracing;
+ ErtsMessage *sig, ***next_nm_sig;
+ ErtsSigRecvTracing tracing;
+
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+
+ state = erts_atomic32_read_nob(&c_p->state);
+ if (!local_only) {
+ if (ERTS_PSFLG_SIG_IN_Q & state) {
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+ }
+ }
+
+ limit = *redsp;
+ *redsp = 0;
+ yield = 0;
+
+ if (!c_p->sig_qs.cont) {
+ *statep = state;
+ return !0;
+ }
+
+ if (state & ERTS_PSFLG_EXITING) {
+ *statep = state;
+ return 0;
+ }
+
+ next_nm_sig = &c_p->sig_qs.nmsigs.next;
+
+ setup_tracing_state(c_p, &tracing);
+ msg_tracing = tracing.messages.active;
+
+ limit *= ERTS_SIG_REDS_CNT_FACTOR;
+ abs_lim = ERTS_SIG_REDS_CNT_FACTOR*max_reds;
+ if (limit > abs_lim)
+ limit = abs_lim;
+
+ cnt = 0;
+
+ do {
+
+ if (msg_tracing) {
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ if (handle_msg_tracing(c_p, &tracing, next_nm_sig) != 0) {
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break; /* tracing limit or end... */
+ }
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ }
+
+ if (!*next_nm_sig)
+ break;
+
+ sig = **next_nm_sig;
+
+ ASSERT(sig);
+ ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+
+ tag = ((ErtsSignal *) sig)->common.tag;
+
+ switch (ERTS_PROC_SIG_OP(tag)) {
+
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED: {
+ int exited;
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+
+ cnt += handle_exit_signal(c_p, &tracing, sig,
+ next_nm_sig, &exited);
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+
+ if (exited)
+ goto stop; /* terminated by signal */
+ /* ignored or converted to exit message... */
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_MONITOR_DOWN: {
+ Uint16 type = ERTS_PROC_SIG_TYPE(tag);
+ ErtsExitSignalData *xsigd = NULL;
+ ErtsMonitorData *mdp = NULL;
+ ErtsMonitor *omon = NULL, *tmon = NULL;
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+
+ switch (type) {
+ case ERTS_MON_TYPE_DIST_PROC:
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_PORT:
+ tmon = (ErtsMonitor *) sig;
+ ASSERT(erts_monitor_is_target(tmon));
+ ASSERT(!erts_monitor_is_in_table(tmon));
+ mdp = erts_monitor_to_data(tmon);
+ if (erts_monitor_is_in_table(&mdp->origin)) {
+ omon = &mdp->origin;
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p),
+ omon);
+ cnt += convert_to_down_message(c_p, sig, mdp,
+ type, next_nm_sig);
+ }
+ break;
+ case ERTS_SIG_Q_TYPE_GEN_EXIT:
+ xsigd = get_exit_signal_data(sig);
+ omon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p),
+ xsigd->u.ref);
+ if (omon) {
+ ASSERT(erts_monitor_is_origin(omon));
+ if (omon->type == ERTS_MON_TYPE_DIST_PROC) {
+ mdp = erts_monitor_to_data(omon);
+ if (erts_monitor_dist_delete(&mdp->target))
+ tmon = &mdp->target;
+ }
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p),
+ omon);
+ cnt += convert_prepared_down_message(c_p, sig,
+ xsigd->message,
+ next_nm_sig);
+ }
+ break;
+ case ERTS_MON_TYPE_NODE:
+ tmon = (ErtsMonitor *) sig;
+ ASSERT(erts_monitor_is_target(tmon));
+ ASSERT(!erts_monitor_is_in_table(tmon));
+ mdp = erts_monitor_to_data(tmon);
+ if (erts_monitor_is_in_table(&mdp->origin)) {
+ omon = &mdp->origin;
+ cnt += handle_nodedown(c_p, sig, mdp, next_nm_sig);
+ }
+ break;
+ case ERTS_MON_TYPE_SUSPEND:
+ tmon = (ErtsMonitor *) sig;
+ ASSERT(erts_monitor_is_target(tmon));
+ ASSERT(!erts_monitor_is_in_table(tmon));
+ mdp = erts_monitor_to_data(tmon);
+ if (erts_monitor_is_in_table(&mdp->origin)) {
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p),
+ &mdp->origin);
+ omon = &mdp->origin;
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ }
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("invalid monitor type");
+ break;
+ }
+
+ if (omon) {
+ if (tmon)
+ erts_monitor_release_both(mdp);
+ else
+ erts_monitor_release(omon);
+ }
+ else {
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ if (xsigd) {
+ sig->next = NULL;
+ erts_cleanup_messages(sig);
+ }
+ if (tmon)
+ erts_monitor_release(tmon);
+ }
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG: {
+ Uint16 type = ERTS_PROC_SIG_TYPE(tag);
+ ErtsMonitor *mon;
+ Eterm msg;
+ Eterm key;
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+
+ key = get_persist_mon_msg(sig, &msg);
+
+ cnt++;
+ mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p), key);
+ if (mon) {
+ ASSERT(erts_monitor_is_origin(mon));
+ handle_persistent_mon_msg(c_p, type, mon, sig,
+ msg, next_nm_sig);
+ }
+ else {
+ cnt++;
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ sig->next = NULL;
+ erts_cleanup_messages(sig);
+ }
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_MONITOR: {
+ ErtsMonitor *mon = (ErtsMonitor *) sig;
+
+ ASSERT(erts_monitor_is_target(mon));
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+
+ remove_nm_sig(c_p, sig, next_nm_sig);
+
+ if (mon->type == ERTS_MON_TYPE_DIST_PROC)
+ erts_monitor_tree_insert(&ERTS_P_MONITORS(c_p), mon);
+ else {
+ erts_monitor_list_insert(&ERTS_P_LT_MONITORS(c_p), mon);
+ if (mon->type == ERTS_MON_TYPE_SUSPEND)
+ handle_suspend(c_p, mon, &yield);
+ }
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ cnt += 2;
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_DEMONITOR: {
+ Uint16 type = ERTS_PROC_SIG_TYPE(tag);
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+
+ remove_nm_sig(c_p, sig, next_nm_sig);
+
+ if (type == ERTS_SIG_Q_TYPE_DIST_PROC_DEMONITOR) {
+ ErtsMonitor *tmon;
+ ErtsSigDistProcDemonitor *dmon;
+ dmon = (ErtsSigDistProcDemonitor *) sig;
+ tmon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p), dmon->ref);
+ destroy_dist_proc_demonitor(dmon);
+ cnt++;
+ if (tmon) {
+ ErtsMonitorData *mdp = erts_monitor_to_data(tmon);
+ ASSERT(erts_monitor_is_target(tmon));
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), tmon);
+ if (!erts_monitor_dist_delete(&mdp->origin))
+ erts_monitor_release(tmon);
+ else
+ erts_monitor_release_both(mdp);
+ cnt += 2;
+ }
+ }
+ else {
+ ErtsMonitor *omon = (ErtsMonitor *) sig;
+ ErtsMonitorData *mdp = erts_monitor_to_data(omon);
+ ASSERT(omon->type == type);
+ ASSERT(erts_monitor_is_origin(omon));
+ ASSERT(!erts_monitor_is_in_table(omon));
+ if (!erts_monitor_is_in_table(&mdp->target))
+ erts_monitor_release(omon);
+ else {
+ ErtsMonitor *tmon = &mdp->target;
+ ASSERT(tmon->type == type);
+ if (type == ERTS_MON_TYPE_DIST_PROC)
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), tmon);
+ else {
+ erts_monitor_list_delete(&ERTS_P_LT_MONITORS(c_p), tmon);
+ switch (type) {
+ case ERTS_MON_TYPE_RESOURCE:
+ erts_nif_demonitored((ErtsResource *) tmon->other.ptr);
+ cnt++;
+ break;
+ case ERTS_MON_TYPE_SUSPEND:
+ erts_resume(c_p, ERTS_PROC_LOCK_MAIN);
+ break;
+ default:
+ break;
+ }
+ }
+ erts_monitor_release_both(mdp);
+ cnt++;
+ }
+ cnt++;
+ }
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_LINK: {
+ ErtsLink *rlnk, *lnk = (ErtsLink *) sig;
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ rlnk = erts_link_tree_insert_addr_replace(&ERTS_P_LINKS(c_p),
+ lnk);
+ if (!rlnk) {
+ if (tracing.procs)
+ getting_linked(c_p, lnk->other.item);
+ }
+ else {
+ if (rlnk->type != ERTS_LNK_TYPE_DIST_PROC)
+ erts_link_release(rlnk);
+ else {
+ ErtsLinkData *ldp;
+ ErtsLink *dlnk = erts_link_to_other(rlnk, &ldp);
+ if (erts_link_dist_delete(dlnk))
+ erts_link_release_both(ldp);
+ else
+ erts_link_release(rlnk);
+ }
+ }
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_UNLINK: {
+ Uint16 type = ERTS_PROC_SIG_TYPE(tag);
+ ErtsLinkData *ldp;
+ ErtsLink *llnk;
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ if (type == ERTS_SIG_Q_TYPE_DIST_LINK) {
+ ErtsSigDistLinkOp *sdlnk = (ErtsSigDistLinkOp *) sig;
+ ASSERT(type == ERTS_SIG_Q_TYPE_DIST_LINK);
+ ASSERT(is_external_pid(sdlnk->remote));
+ llnk = erts_link_tree_lookup(ERTS_P_LINKS(c_p), sdlnk->remote);
+ if (llnk) {
+ ErtsLink *dlnk = erts_link_to_other(llnk, &ldp);
+ erts_link_tree_delete(&ERTS_P_LINKS(c_p), llnk);
+ if (erts_link_dist_delete(dlnk))
+ erts_link_release_both(ldp);
+ else
+ erts_link_release(llnk);
+ cnt += 8;
+ if (tracing.procs)
+ getting_unlinked(c_p, sdlnk->remote);
+ }
+ destroy_sig_dist_link_op(sdlnk);
+ cnt++;
+ }
+ else {
+ ErtsLinkData *ldp;
+ ErtsLink *dlnk, *slnk;
+ slnk = (ErtsLink *) sig;
+ llnk = erts_link_to_other(slnk, &ldp);
+ dlnk = erts_link_tree_key_delete(&ERTS_P_LINKS(c_p), llnk);
+ if (!dlnk)
+ erts_link_release(slnk);
+ else {
+ if (tracing.procs)
+ getting_unlinked(c_p, llnk->other.item);
+ if (dlnk == llnk)
+ erts_link_release_both(ldp);
+ else {
+ erts_link_release(slnk);
+ erts_link_release(dlnk);
+ }
+ }
+ cnt += 2;
+ }
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_GROUP_LEADER: {
+ ErtsSigGroupLeader *sgl = (ErtsSigGroupLeader *) sig;
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ handle_group_leader(c_p, sgl);
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_IS_ALIVE:
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ is_alive_response(c_p, sig, !0);
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+
+ case ERTS_SIG_Q_OP_PROCESS_INFO:
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ handle_process_info(c_p, &tracing, sig, next_nm_sig, !0);
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+
+ case ERTS_SIG_Q_OP_SYNC_SUSPEND:
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ handle_sync_suspend(c_p, sig);
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+
+ case ERTS_SIG_Q_OP_RPC:
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ cnt += handle_rpc(c_p, (ErtsProcSigRPC *) sig, cnt,
+ limit, &yield);
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+
+ case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE: {
+ Uint16 type = ERTS_PROC_SIG_TYPE(tag);
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ msg_tracing = handle_trace_change_state(c_p, &tracing,
+ type, sig,
+ next_nm_sig);
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+
+ break;
+ }
+
+ default:
+ ERTS_INTERNAL_ERROR("Unknown signal");
+ break;
+ }
+
+ cnt++;
+
+ } while (cnt <= limit || stretch_limit(c_p, &tracing, abs_lim, &limit));
+
+stop: {
+ int deferred_save, deferred_saved_last, res;
+
+ deferred_saved_last = !!(c_p->flags & F_DEFERRED_SAVED_LAST);
+ deferred_save = 0;
+
+ if (!deferred_saved_last)
+ deferred_save = 0;
+ else {
+ if (c_p->sig_qs.saved_last == &c_p->sig_qs.cont) {
+ c_p->sig_qs.saved_last = c_p->sig_qs.last;
+ c_p->flags &= ~F_DEFERRED_SAVED_LAST;
+ deferred_saved_last = deferred_save = 0;
+ }
+ else {
+ if (c_p->sig_qs.save == c_p->sig_qs.last)
+ deferred_save = !0;
+ else
+ deferred_save = 0;
+ }
+ }
+
+ ASSERT(c_p->sig_qs.saved_last != &c_p->sig_qs.cont);
+
+ if (ERTS_UNLIKELY(msg_tracing != 0)) {
+ /*
+ * All messages that has been traced should
+ * be moved to inner queue. Next signal in
+ * middle queue should either be next message
+ * to trace or next non-message signal.
+ */
+ ASSERT(tracing.messages.next);
+ if (*next_nm_sig) {
+ if (*next_nm_sig == tracing.messages.next)
+ *next_nm_sig = &c_p->sig_qs.cont;
+ if (c_p->sig_qs.nmsigs.last == tracing.messages.next)
+ c_p->sig_qs.nmsigs.last = &c_p->sig_qs.cont;
+ *statep = erts_atomic32_read_nob(&c_p->state);
+ }
+ else {
+ ASSERT(!c_p->sig_qs.nmsigs.next);
+ c_p->sig_qs.nmsigs.last = NULL;
+ state = erts_atomic32_read_band_nob(&c_p->state,
+ ~ERTS_PSFLG_SIG_Q);
+ state &= ~ERTS_PSFLG_SIG_Q;
+ *statep = state;
+ }
+
+ if (tracing.messages.next != &c_p->sig_qs.cont) {
+ *c_p->sig_qs.last = c_p->sig_qs.cont;
+ c_p->sig_qs.last = tracing.messages.next;
+
+ c_p->sig_qs.cont = *tracing.messages.next;
+ if (!c_p->sig_qs.cont)
+ c_p->sig_qs.cont_last = &c_p->sig_qs.cont;
+ *c_p->sig_qs.last = NULL;
+ }
+
+ res = !c_p->sig_qs.cont;
+ }
+ else if (*next_nm_sig) {
+ /*
+ * All messages prior to next non-message
+ * signal should be moved to inner queue.
+ * Next non-message signal to handle should
+ * be first in middle queue.
+ */
+ ASSERT(**next_nm_sig);
+ if (*next_nm_sig != &c_p->sig_qs.cont) {
+ *c_p->sig_qs.last = c_p->sig_qs.cont;
+ c_p->sig_qs.last = *next_nm_sig;
+
+ c_p->sig_qs.cont = **next_nm_sig;
+ if (c_p->sig_qs.nmsigs.last == *next_nm_sig)
+ c_p->sig_qs.nmsigs.last = &c_p->sig_qs.cont;
+ *next_nm_sig = &c_p->sig_qs.cont;
+ *c_p->sig_qs.last = NULL;
+ }
+
+ ASSERT(c_p->sig_qs.cont);
+
+ *statep = erts_atomic32_read_nob(&c_p->state);
+
+ res = 0;
+ }
+ else {
+ /*
+ * All non-message signals handled. All
+ * messages should be moved to inner queue.
+ * Middle queue should be empty.
+ */
+ ASSERT(!c_p->sig_qs.nmsigs.next);
+ c_p->sig_qs.nmsigs.last = NULL;
+
+ if (c_p->sig_qs.cont_last != &c_p->sig_qs.cont) {
+ ASSERT(!*c_p->sig_qs.last);
+ *c_p->sig_qs.last = c_p->sig_qs.cont;
+ c_p->sig_qs.last = c_p->sig_qs.cont_last;
+ ASSERT(!*c_p->sig_qs.last);
+
+ c_p->sig_qs.cont_last = &c_p->sig_qs.cont;
+ c_p->sig_qs.cont = NULL;
+ }
+
+ ASSERT(!c_p->sig_qs.cont);
+
+ state = erts_atomic32_read_band_nob(&c_p->state,
+ ~ERTS_PSFLG_SIG_Q);
+ state &= ~ERTS_PSFLG_SIG_Q;
+ *statep = state;
+ res = !0;
+ }
+
+ if (deferred_saved_last
+ && (c_p->sig_qs.saved_last == &c_p->sig_qs.cont)) {
+ c_p->sig_qs.saved_last = c_p->sig_qs.last;
+ c_p->flags &= ~F_DEFERRED_SAVED_LAST;
+ if (deferred_save)
+ c_p->sig_qs.save = c_p->sig_qs.saved_last;
+ }
+ else if (!res) {
+ if (deferred_save) {
+ c_p->sig_qs.save = c_p->sig_qs.last;
+ ASSERT(!PEEK_MESSAGE(c_p));
+ }
+ }
+ else {
+ c_p->flags &= ~F_DEFERRED_SAVED_LAST;
+ if (deferred_save)
+ c_p->sig_qs.save = c_p->sig_qs.saved_last;
+ }
+
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
+
+ *redsp = cnt/4 + 1;
+
+ if (yield) {
+ int vreds = max_reds - *redsp;
+ if (vreds > 0) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ esdp->virtual_reds += vreds;
+ }
+ *redsp = max_reds;
+ }
+
+ return res;
+ }
+}
+
+static int
+stretch_limit(Process *c_p, ErtsSigRecvTracing *tp,
+ int abs_lim, int *limp)
+{
+ int lim;
+ /*
+ * Stretch limit up to a maximum of 'abs_lim' if
+ * there currently are no messages available to
+ * inspect by 'receive' and it might be possible
+ * to get messages available by processing
+ * signals (or trace messages).
+ */
+
+ lim = *limp;
+ ASSERT(abs_lim >= lim);
+ if (abs_lim == lim)
+ return 0;
+
+ if (!(c_p->flags & F_DEFERRED_SAVED_LAST)) {
+ ErtsSignal *sig;
+
+ if (PEEK_MESSAGE(c_p))
+ return 0;
+ sig = (ErtsSignal *) c_p->sig_qs.cont;
+ if (!sig)
+ return 0; /* No signals to process available... */
+ if (ERTS_SIG_IS_MSG(sig) && tp->messages.next != &c_p->sig_qs.cont)
+ return 0;
+ }
+
+ lim += ERTS_SIG_REDS_CNT_FACTOR*100;
+ if (lim > abs_lim)
+ lim = abs_lim;
+ *limp = lim;
+ return !0;
+}
+
+
+int
+erts_proc_sig_handle_exit(Process *c_p, int *redsp)
+{
+ int cnt, limit;
+ ErtsMessage *sig, ***next_nm_sig;
+
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(!(ERTS_PSFLG_SIG_IN_Q & erts_atomic32_read_nob(&c_p->state)));
+
+ limit = *redsp;
+ limit *= ERTS_SIG_REDS_CNT_FACTOR;
+
+ *redsp = 1;
+
+ next_nm_sig = &c_p->sig_qs.nmsigs.next;
+
+ if (!*next_nm_sig) {
+ ASSERT(!c_p->sig_qs.nmsigs.last);
+ return !0; /* done... */
+ }
+
+ cnt = 0;
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, NULL, next_nm_sig);
+
+ do {
+ Eterm tag;
+ Uint16 type;
+ int op;
+
+ sig = **next_nm_sig;
+
+ ASSERT(sig);
+ ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+
+ tag = ((ErtsSignal *) sig)->common.tag;
+ type = ERTS_PROC_SIG_TYPE(tag);
+ op = ERTS_PROC_SIG_OP(tag);
+
+ remove_nm_sig(c_p, sig, next_nm_sig);
+
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, NULL, next_nm_sig);
+
+ cnt++;
+
+ switch (op) {
+
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED:
+ case ERTS_SIG_Q_OP_MONITOR_DOWN:
+ switch (type) {
+ case ERTS_SIG_Q_TYPE_GEN_EXIT:
+ sig->next = NULL;
+ erts_cleanup_messages(sig);
+ break;
+ case ERTS_LNK_TYPE_PORT:
+ case ERTS_LNK_TYPE_PROC:
+ case ERTS_LNK_TYPE_DIST_PROC:
+ erts_link_release((ErtsLink *) sig);
+ break;
+ case ERTS_MON_TYPE_PORT:
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_DIST_PROC:
+ case ERTS_MON_TYPE_NODE:
+ case ERTS_MON_TYPE_NODES:
+ case ERTS_MON_TYPE_SUSPEND:
+ erts_monitor_release((ErtsMonitor *) sig);
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected sig type");
+ break;
+ }
+ break;
+
+ case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG:
+ sig->next = NULL;
+ erts_cleanup_messages(sig);
+ break;
+
+ case ERTS_SIG_Q_OP_MONITOR: {
+ ErtsProcExitContext pectxt = {c_p, am_noproc};
+ erts_proc_exit_handle_monitor((ErtsMonitor *) sig,
+ (void *) &pectxt);
+ cnt += 4;
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_DEMONITOR:
+ if (type == ERTS_SIG_Q_TYPE_DIST_PROC_DEMONITOR)
+ destroy_dist_proc_demonitor((ErtsSigDistProcDemonitor *) sig);
+ else
+ erts_monitor_release((ErtsMonitor *) sig);
+ break;
+
+ case ERTS_SIG_Q_OP_LINK: {
+ ErtsProcExitContext pectxt = {c_p, am_noproc};
+ erts_proc_exit_handle_link((ErtsLink *) sig, (void *) &pectxt);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_UNLINK:
+ if (type == ERTS_SIG_Q_TYPE_DIST_LINK)
+ destroy_sig_dist_link_op((ErtsSigDistLinkOp *) sig);
+ else
+ erts_link_release((ErtsLink *) sig);
+ break;
+
+ case ERTS_SIG_Q_OP_GROUP_LEADER: {
+ ErtsSigGroupLeader *sgl = (ErtsSigGroupLeader *) sig;
+ handle_group_leader(c_p, sgl);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_IS_ALIVE:
+ is_alive_response(c_p, sig, 0);
+ break;
+
+ case ERTS_SIG_Q_OP_PROCESS_INFO:
+ handle_process_info(c_p, NULL, sig, next_nm_sig, 0);
+ break;
+
+ case ERTS_SIG_Q_OP_SYNC_SUSPEND:
+ handle_sync_suspend(c_p, sig);
+ break;
+
+ case ERTS_SIG_Q_OP_RPC: {
+ int yield = 0;
+ handle_rpc(c_p, (ErtsProcSigRPC *) sig,
+ cnt, limit, &yield);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
+ destroy_trace_info((ErtsSigTraceInfo *) sig);
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Unknown signal");
+ break;
+ }
+
+ } while (cnt >= limit && *next_nm_sig);
+
+ *redsp += cnt / ERTS_SIG_REDS_CNT_FACTOR;
+
+ if (*next_nm_sig)
+ return 0;
+
+ ASSERT(!c_p->sig_qs.nmsigs.next);
+ c_p->sig_qs.nmsigs.last = NULL;
+ (void) erts_atomic32_read_band_nob(&c_p->state,
+ ~ERTS_PSFLG_SIG_Q);
+ return !0;
+}
+
+#ifdef USE_VM_PROBES
+# define ERTS_CLEAR_SEQ_TOKEN(MP) \
+ ERL_MESSAGE_TOKEN((MP)) = ((ERL_MESSAGE_DT_UTAG((MP)) != NIL) \
+ ? am_have_dt_utag : NIL)
+#else
+# define ERTS_CLEAR_SEQ_TOKEN(MP) \
+ ERL_MESSAGE_TOKEN((MP)) = NIL
+#endif
+
+static ERTS_INLINE void
+clear_seq_trace_token(ErtsMessage *sig)
+{
+ if (ERTS_SIG_IS_MSG((ErtsSignal *) sig))
+ ERTS_CLEAR_SEQ_TOKEN(sig);
+ else {
+ Uint tag;
+ Uint16 op, type;
+
+ tag = ((ErtsSignal *) sig)->common.tag;
+ type = ERTS_PROC_SIG_TYPE(tag);
+ op = ERTS_PROC_SIG_OP(tag);
+
+ switch (op) {
+
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED:
+ case ERTS_SIG_Q_OP_MONITOR_DOWN:
+ switch (type) {
+ case ERTS_SIG_Q_TYPE_GEN_EXIT:
+ ERTS_CLEAR_SEQ_TOKEN(sig);
+ break;
+ case ERTS_LNK_TYPE_PORT:
+ case ERTS_LNK_TYPE_PROC:
+ case ERTS_LNK_TYPE_DIST_PROC:
+ case ERTS_MON_TYPE_PORT:
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_DIST_PROC:
+ case ERTS_MON_TYPE_NODE:
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected sig type");
+ break;
+ }
+ break;
+
+ case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG:
+ ERTS_CLEAR_SEQ_TOKEN(sig);
+ break;
+
+ case ERTS_SIG_Q_OP_MONITOR:
+ case ERTS_SIG_Q_OP_DEMONITOR:
+ case ERTS_SIG_Q_OP_LINK:
+ case ERTS_SIG_Q_OP_UNLINK:
+ case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Unknown signal");
+ break;
+ }
+ }
+}
+
+void
+erts_proc_sig_clear_seq_trace_tokens(Process *c_p)
+{
+ ASSERT(erts_thr_progress_is_blocking());
+ erts_proc_sig_fetch(c_p);
+ ERTS_FOREACH_SIG_PRIVQS(c_p, sig, clear_seq_trace_token(sig));
+}
+
+Uint
+erts_proc_sig_signal_size(ErtsSignal *sig)
+{
+ Eterm tag;
+ Uint16 type;
+ int op;
+ Uint size = 0;
+
+ ASSERT(sig);
+ ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+
+ tag = sig->common.tag;
+ type = ERTS_PROC_SIG_TYPE(tag);
+ op = ERTS_PROC_SIG_OP(tag);
+
+ switch (op) {
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED:
+ case ERTS_SIG_Q_OP_MONITOR_DOWN:
+ switch (type) {
+ case ERTS_SIG_Q_TYPE_GEN_EXIT:
+ size = ((ErtsMessage *) sig)->hfrag.alloc_size;
+ size *= sizeof(Eterm);
+ size += sizeof(ErtsMessage) - sizeof(Eterm);
+ break;
+ case ERTS_LNK_TYPE_PORT:
+ case ERTS_LNK_TYPE_PROC:
+ case ERTS_LNK_TYPE_DIST_PROC:
+ size = erts_link_size((ErtsLink *) sig);
+ break;
+ case ERTS_MON_TYPE_PORT:
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_DIST_PROC:
+ case ERTS_MON_TYPE_NODE:
+ size = erts_monitor_size((ErtsMonitor *) sig);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected sig type");
+ break;
+ }
+ break;
+
+ case ERTS_SIG_Q_OP_SYNC_SUSPEND:
+ case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG:
+ case ERTS_SIG_Q_OP_IS_ALIVE:
+ size = ((ErtsMessage *) sig)->hfrag.alloc_size;
+ size *= sizeof(Eterm);
+ size += sizeof(ErtsMessage) - sizeof(Eterm);
+ break;
+
+ case ERTS_SIG_Q_OP_DEMONITOR:
+ if (type == ERTS_SIG_Q_TYPE_DIST_PROC_DEMONITOR) {
+ size = NC_HEAP_SIZE(((ErtsSigDistProcDemonitor *) sig)->ref);
+ size--;
+ size *= sizeof(Eterm);
+ size += sizeof(ErtsSigDistProcDemonitor);
+ break;
+ }
+ /* Fall through... */
+
+ case ERTS_SIG_Q_OP_MONITOR:
+ size = erts_monitor_size((ErtsMonitor *) sig);
+ break;
+
+ case ERTS_SIG_Q_OP_UNLINK:
+ if (type == ERTS_SIG_Q_TYPE_DIST_LINK) {
+ size = NC_HEAP_SIZE(((ErtsSigDistLinkOp *) sig)->remote);
+ size--;
+ size *= sizeof(Eterm);
+ size += sizeof(ErtsSigDistLinkOp);
+ break;
+ }
+ /* Fall through... */
+
+ case ERTS_SIG_Q_OP_LINK:
+ size = erts_link_size((ErtsLink *) sig);
+ break;
+
+ case ERTS_SIG_Q_OP_GROUP_LEADER: {
+ ErtsSigGroupLeader *sgl = (ErtsSigGroupLeader *) sig;
+ size = size_object(sgl->group_leader);
+ size += size_object(sgl->ref);
+ size *= sizeof(Eterm);
+ size += sizeof(ErtsSigGroupLeader) - sizeof(Eterm);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
+ size = sizeof(ErtsSigTraceInfo);
+ break;
+
+ case ERTS_SIG_Q_OP_PROCESS_INFO: {
+ ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig;
+ size = sizeof(ErtsProcessInfoSig);
+ size += (pisig->len - 1) * sizeof(int);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_RPC:
+ size = sizeof(ErtsProcSigRPC);
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Unknown signal");
+ break;
+ }
+
+ return size;
+}
+
+int
+erts_proc_sig_receive_helper(Process *c_p,
+ int fcalls,
+ int neg_o_reds,
+ ErtsMessage **msgpp,
+ int *get_outp)
+{
+ ErtsMessage *msgp;
+ int reds, consumed_reds, left_reds, max_reds;
+
+ /*
+ * Called from the loop-rec instruction when receive
+ * has reached end of inner (private) queue. This function
+ * tries to move more messages into the inner queue
+ * for the receive to handle. This by, processing the
+ * middle (private) queue and/or moving signals from
+ * the outer (public) queue into the middle queue.
+ *
+ * If this function succeeds in making more messages
+ * available in the inner queue, *msgpp points to next
+ * message. If *msgpp is set to NULL when:
+ * -- process became exiting. *get_outp is set to a
+ * value greater than zero.
+ * -- process needs to yield. *get_outp is set to a
+ * value less than zero.
+ * -- no more messages exist in any of the queues.
+ * *get_outp is set to zero and the message queue
+ * lock remains locked. This so the process can
+ * make its way to the appropriate wait instruction
+ * without racing with new incoming messages.
+ */
+
+ ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ ASSERT(!*msgpp);
+
+ left_reds = fcalls - neg_o_reds;
+ consumed_reds = 0;
+
+ while (!0) {
+ erts_aint32_t state;
+
+ if (!c_p->sig_qs.cont) {
+
+ consumed_reds += 4;
+ left_reds -= 4;
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(c_p);
+ /*
+ * Messages may have been moved directly to
+ * inner queue...
+ */
+ msgp = PEEK_MESSAGE(c_p);
+ if (msgp) {
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+ *get_outp = 0;
+ *msgpp = msgp;
+ return consumed_reds;
+ }
+
+ if (!c_p->sig_qs.cont) {
+ /*
+ * No messages! Return with message queue
+ * locked and let the process continue
+ * to wait instruction...
+ */
+ *get_outp = 0;
+ *msgpp = NULL;
+
+ return consumed_reds;
+ }
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+
+ if (left_reds <= 0)
+ break; /* Yield */
+
+ /* handle newly arrived signals... */
+ }
+
+ reds = ERTS_SIG_HANDLE_REDS_MAX_PREFERED;
+#ifdef DEBUG
+ /* test that it works also with very few reds */
+ max_reds = left_reds;
+ if (reds > left_reds)
+ reds = left_reds;
+#else
+ /* At least work preferred amount of reds... */
+ max_reds = left_reds;
+ if (max_reds < reds)
+ max_reds = reds;
+#endif
+ (void) erts_proc_sig_handle_incoming(c_p, &state, &reds,
+ max_reds, !0);
+ consumed_reds += reds;
+ left_reds -= reds;
+
+ /* we may have exited or suspended by an incoming signal... */
+
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_SUSPENDED)) {
+ if (state & ERTS_PSFLG_SUSPENDED)
+ break; /* Yield */
+
+ /*
+ * Process need to schedule out in order
+ * to terminate. Prepare this a bit...
+ */
+ ASSERT(state & ERTS_PSFLG_EXITING);
+ ASSERT(c_p->flags & F_DELAY_GC);
+
+ c_p->flags &= ~F_DELAY_GC;
+ c_p->arity = 0;
+ c_p->current = NULL;
+
+ *get_outp = 1;
+ *msgpp = NULL;
+
+ return consumed_reds;
+ }
+
+ msgp = PEEK_MESSAGE(c_p);
+ if (msgp) {
+ *get_outp = 0;
+ *msgpp = msgp;
+ return consumed_reds;
+ }
+
+ if (left_reds <= 0)
+ break; /* yield */
+
+ ASSERT(!c_p->sig_qs.cont);
+ /* Go fetch again... */
+ }
+
+ /* Yield... */
+
+ *get_outp = -1;
+ *msgpp = NULL;
+
+ ASSERT(consumed_reds >= (fcalls - neg_o_reds));
+ return consumed_reds;
+}
+
+static int
+handle_trace_change_state(Process *c_p,
+ ErtsSigRecvTracing *tracing,
+ Uint16 type,
+ ErtsMessage *sig,
+ ErtsMessage ***next_nm_sig)
+{
+ ErtsSigTraceInfo *trace_info = (ErtsSigTraceInfo *) sig;
+ ErtsMessage **next = *next_nm_sig;
+ int msgs_active, old_msgs_active = !!tracing->messages.active;
+
+ ASSERT(sig == *next);
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+
+ ERTS_TRACE_FLAGS(c_p) |= trace_info->flags_on;
+ ERTS_TRACE_FLAGS(c_p) &= ~trace_info->flags_off;
+ if (is_value(trace_info->tracer))
+ erts_tracer_replace(&c_p->common, trace_info->tracer);
+
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ destroy_trace_info(trace_info);
+ /*
+ * Adjust tracing state according to modifications made by
+ * the trace info signal...
+ */
+ adjust_tracing_state(c_p, tracing, 0);
+ msgs_active = !!tracing->messages.active;
+
+ if (old_msgs_active ^ msgs_active) {
+ if (msgs_active) {
+ ASSERT(!tracing->messages.next);
+ tracing->messages.next = next;
+ }
+ else {
+ ASSERT(tracing->messages.next);
+ tracing->messages.next = NULL;
+ }
+ }
+
+ ASSERT(!msgs_active || tracing->messages.next);
+
+ return msgs_active;
+}
+
+static void
+getting_unlinked(Process *c_p, Eterm unlinker)
+{
+ trace_proc(c_p, ERTS_PROC_LOCK_MAIN, c_p,
+ am_getting_unlinked, unlinker);
+}
+
+static void
+getting_linked(Process *c_p, Eterm linker)
+{
+ trace_proc(c_p, ERTS_PROC_LOCK_MAIN, c_p,
+ am_getting_linked, linker);
+}
+
+static ERTS_INLINE void
+handle_message_enqueued_tracing(Process *c_p,
+ ErtsSigRecvTracing *tracing,
+ ErtsMessage *msg)
+{
+ ASSERT(ERTS_SIG_IS_INTERNAL_MSG(msg));
+
+#if defined(USE_VM_PROBES)
+ if (tracing->messages.vm_probes && DTRACE_ENABLED(message_queued)) {
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+ Sint len = erts_proc_sig_privqs_len(c_p);
+ Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg);
+
+ if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
+ tok_label = SEQ_TRACE_T_DTRACE_LABEL(seq_trace_token);
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
+ }
+ /* Message intentionally not passed... */
+ DTRACE6(message_queued,
+ tracing->messages.receiver_name,
+ size_object(ERL_MESSAGE_TERM(msg)),
+ len, /* This is NOT message queue len, but its something... */
+ tok_label, tok_lastcnt, tok_serial);
+ }
+#endif
+
+ if (tracing->messages.receive_trace && tracing->messages.event->on) {
+ ASSERT(IS_TRACED(c_p));
+ trace_receive(c_p,
+ ERL_MESSAGE_FROM(msg),
+ ERL_MESSAGE_TERM(msg),
+ tracing->messages.event);
+ }
+}
+
+static int
+handle_msg_tracing(Process *c_p, ErtsSigRecvTracing *tracing,
+ ErtsMessage ***next_nm_sig)
+{
+ ErtsMessage **next_sig, *sig;
+ int cnt = 0, limit = ERTS_PROC_SIG_TRACE_COUNT_LIMIT;
+
+ ASSERT(tracing->messages.next);
+ next_sig = tracing->messages.next;
+ sig = *next_sig;
+
+ /*
+ * Receive tracing active. Handle all messages
+ * until next non-message signal...
+ */
+
+ while (sig && ERTS_SIG_IS_MSG(sig)) {
+ if (cnt > limit) {
+ tracing->messages.next = next_sig;
+ return -1; /* Yield... */
+ }
+ if (ERTS_SIG_IS_EXTERNAL_MSG(sig)) {
+ cnt++;
+ if (!erts_decode_dist_message(c_p, ERTS_PROC_LOCK_MAIN,
+ sig, 0)) {
+ /* Bad dist message; remove it... */
+ remove_mq_m_sig(c_p, sig, next_sig, next_nm_sig);
+ sig = *next_sig;
+ continue;
+ }
+ }
+ handle_message_enqueued_tracing(c_p, tracing, sig);
+ cnt++;
+
+ next_sig = &(*next_sig)->next;
+ sig = *next_sig;
+ }
+
+ tracing->messages.next = next_sig;
+
+ if (!sig) {
+ ASSERT(!*next_nm_sig);
+ return 1; /* end... */
+ }
+
+ ASSERT(*next_nm_sig);
+ ASSERT(**next_nm_sig == sig);
+
+ /* Next signal a non-message signal... */
+ ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+
+ /*
+ * Return and handle the non-message signal...
+ */
+
+ return 0;
+}
+
+Uint
+erts_proc_sig_prep_msgq_for_inspection(Process *c_p,
+ Process *rp,
+ ErtsProcLocks rp_locks,
+ int info_on_self,
+ ErtsMessageInfo *mip)
+{
+ Uint tot_heap_size;
+ ErtsMessage *mp, **mpp;
+ Sint i;
+ int self_on_heap;
+
+ /*
+ * Prepare the message queue (inner signal queue)
+ * for inspection by process_info().
+ *
+ * - Decode all messages on external format
+ * - Remove all corrupt dist messages from queue
+ * - Save pointer to, and heap size need of each
+ * message in the mip array.
+ * - Return total heap size need for all messages
+ * that needs to be copied.
+ *
+ */
+
+ ASSERT(!info_on_self || c_p == rp);
+
+ self_on_heap = info_on_self && !(c_p->flags & F_OFF_HEAP_MSGQ);
+
+ tot_heap_size = 0;
+ i = 0;
+ mpp = &rp->sig_qs.first;
+ mp = rp->sig_qs.first;
+ while (mp) {
+ Eterm msg = ERL_MESSAGE_TERM(mp);
+
+ mip[i].size = 0;
+
+ if (ERTS_SIG_IS_EXTERNAL_MSG(mp)) {
+ /* decode it... */
+ if (mp->data.attached)
+ erts_decode_dist_message(rp, rp_locks, mp, !0);
+
+ msg = ERL_MESSAGE_TERM(mp);
+
+ if (is_non_value(msg)) {
+ ErtsMessage *bad_mp = mp;
+ /*
+ * Bad distribution message; remove
+ * it from the queue...
+ */
+ ASSERT(!mp->data.attached);
+
+ ASSERT(*mpp == bad_mp);
+
+ remove_iq_m_sig(rp, mp, mpp);
+
+ mp = *mpp;
+
+ bad_mp->next = NULL;
+ erts_cleanup_messages(bad_mp);
+ continue;
+ }
+ }
+
+ ASSERT(is_value(msg));
+
+ if (is_not_immed(msg) && (!self_on_heap || mp->data.attached)) {
+ Uint sz = size_object(msg);
+ mip[i].size = sz;
+ tot_heap_size += sz;
+ }
+
+ mip[i].msgp = mp;
+ i++;
+ mpp = &mp->next;
+ mp = mp->next;
+ }
+
+ ASSERT(c_p->sig_qs.len == i);
+
+ return tot_heap_size;
+}
+
+static ERTS_INLINE void
+move_msg_to_heap(Process *c_p, ErtsMessage *mp)
+{
+ /*
+ * We leave not yet decoded distribution messages
+ * as they are in the queue since it is not
+ * possible to determine a maximum size until
+ * actual decoding...
+ *
+ * We also leave combined messages as they are...
+ */
+ if (ERTS_SIG_IS_INTERNAL_MSG(mp)
+ && mp->data.attached
+ && mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
+ ErlHeapFragment *bp;
+
+ bp = erts_message_to_heap_frag(mp);
+
+ if (bp->next)
+ erts_move_multi_frags(&c_p->htop, &c_p->off_heap, bp,
+ mp->m, ERL_MESSAGE_REF_ARRAY_SZ, 0);
+ else
+ erts_copy_one_frag(&c_p->htop, &c_p->off_heap, bp,
+ mp->m, ERL_MESSAGE_REF_ARRAY_SZ);
+
+ mp->data.heap_frag = NULL;
+ free_message_buffer(bp);
+ }
+}
+
+void
+erts_proc_sig_move_msgs_to_heap(Process *c_p)
+{
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
+
+ ERTS_FOREACH_SIG_PRIVQS(c_p, sig, move_msg_to_heap(c_p, sig));
+
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
+}
+
+
+BIF_RETTYPE
+erts_internal_dirty_process_handle_signals_1(BIF_ALIST_1)
+{
+ erts_aint32_t state, dirty, noproc;
+ int busy;
+ Process *rp;
+
+ if (BIF_P != erts_dirty_process_signal_handler
+ && BIF_P != erts_dirty_process_signal_handler_high
+ && BIF_P != erts_dirty_process_signal_handler_max)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_RET(am_false);
+
+ rp = erts_proc_lookup_raw(BIF_ARG_1);
+ if (!rp)
+ BIF_RET(am_noproc);
+
+ state = erts_atomic32_read_nob(&rp->state);
+ dirty = (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
+ if (!dirty)
+ BIF_RET(am_normal);
+
+ busy = erts_proc_trylock(rp, ERTS_PROC_LOCK_MAIN) == EBUSY;
+
+ state = erts_atomic32_read_mb(&rp->state);
+ noproc = (state & ERTS_PSFLG_FREE);
+ dirty = (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
+
+ if (busy) {
+ if (noproc)
+ BIF_RET(am_noproc);
+ if (dirty)
+ BIF_RET(am_more); /* try again... */
+ BIF_RET(am_normal); /* will handle signals itself... */
+ }
+ else {
+ erts_aint32_t state;
+ int done;
+ Eterm res = am_false;
+ int reds = 0;
+
+ if (noproc)
+ res = am_noproc;
+ else if (!dirty)
+ res = am_normal; /* will handle signals itself... */
+ else {
+ reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ done = erts_proc_sig_handle_incoming(rp, &state, &reds,
+ reds, 0);
+ if (done || (state & ERTS_PSFLG_EXITING))
+ res = am_ok;
+ else
+ res = am_more;
+ }
+
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+
+ if (reds)
+ BUMP_REDS(BIF_P, reds);
+
+ BIF_RET(res);
+ }
+}
+
+static void
+debug_foreach_sig_heap_frags(ErlHeapFragment *hfrag,
+ void (*oh_func)(ErlOffHeap *, void *),
+ void *arg)
+{
+ ErlHeapFragment *hf = hfrag;
+ while (hf) {
+ oh_func(&(hf->off_heap), arg);
+ hf = hf->next;
+ }
+}
+
+static void
+debug_foreach_sig_fake_oh(Eterm term,
+ void (*oh_func)(ErlOffHeap *, void *),
+ void *arg)
+{
+ if (is_external(term)) {
+ ErlOffHeap oh;
+ oh.overhead = 0;
+ oh.first = ((struct erl_off_heap_header *)
+ (char *) external_thing_ptr(term));
+ ASSERT(!oh.first->next);
+ oh_func(&oh, arg);
+ }
+
+}
+
+void
+erts_proc_sig_debug_foreach_sig(Process *c_p,
+ void (*msg_func)(ErtsMessage *, void *),
+ void (*oh_func)(ErlOffHeap *, void *),
+ void (*mon_func)(ErtsMonitor *, void *),
+ void (*lnk_func)(ErtsLink *, void *),
+ void *arg)
+{
+ ErtsMessage *queue[] = {c_p->sig_qs.first, c_p->sig_qs.cont, c_p->sig_inq.first};
+ int qix;
+
+ for (qix = 0; qix < sizeof(queue)/sizeof(queue[0]); qix++) {
+ ErtsMessage *sig;
+ for (sig = queue[qix]; sig; sig = sig->next) {
+
+ if (ERTS_SIG_IS_MSG(sig))
+ msg_func(sig, arg);
+ else {
+ Eterm tag;
+ Uint16 type;
+ int op;
+
+ ASSERT(sig);
+ ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+
+ tag = ((ErtsSignal *) sig)->common.tag;
+ type = ERTS_PROC_SIG_TYPE(tag);
+ op = ERTS_PROC_SIG_OP(tag);
+
+ switch (op) {
+
+ case ERTS_SIG_Q_OP_EXIT:
+ case ERTS_SIG_Q_OP_EXIT_LINKED:
+ case ERTS_SIG_Q_OP_MONITOR_DOWN:
+ switch (type) {
+ case ERTS_SIG_Q_TYPE_GEN_EXIT:
+ debug_foreach_sig_heap_frags(&sig->hfrag, oh_func, arg);
+ break;
+ case ERTS_LNK_TYPE_PORT:
+ case ERTS_LNK_TYPE_PROC:
+ case ERTS_LNK_TYPE_DIST_PROC:
+ lnk_func((ErtsLink *) sig, arg);
+ break;
+ case ERTS_MON_TYPE_PORT:
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_DIST_PROC:
+ case ERTS_MON_TYPE_NODE:
+ mon_func((ErtsMonitor *) sig, arg);
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected sig type");
+ break;
+ }
+ break;
+
+ case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG:
+ debug_foreach_sig_heap_frags(&sig->hfrag, oh_func, arg);
+ break;
+
+ case ERTS_SIG_Q_OP_DEMONITOR:
+ if (type == ERTS_SIG_Q_TYPE_DIST_PROC_DEMONITOR) {
+ debug_foreach_sig_fake_oh(((ErtsSigDistProcDemonitor *) sig)->ref,
+ oh_func, arg);
+ break;
+ }
+ /* Fall through... */
+
+ case ERTS_SIG_Q_OP_MONITOR:
+ mon_func((ErtsMonitor *) sig, arg);
+ break;
+
+ case ERTS_SIG_Q_OP_UNLINK:
+ if (type == ERTS_SIG_Q_TYPE_DIST_LINK) {
+ debug_foreach_sig_fake_oh(((ErtsSigDistLinkOp *) sig)->remote,
+ oh_func, arg);
+ break;
+ }
+ /* Fall through... */
+
+ case ERTS_SIG_Q_OP_LINK:
+ lnk_func((ErtsLink *) sig, arg);
+ break;
+
+ case ERTS_SIG_Q_OP_GROUP_LEADER: {
+ ErtsSigGroupLeader *sgl = (ErtsSigGroupLeader *) sig;
+ oh_func(&sgl->oh, arg);
+ break;
+ }
+
+ case ERTS_SIG_Q_OP_IS_ALIVE:
+ case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
+ case ERTS_SIG_Q_OP_PROCESS_INFO:
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Unknown signal");
+ break;
+ }
+
+ }
+ }
+ }
+}
+
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+
+static void
+chk_eterm(Process *c_p, int privq, ErtsMessage *mp, Eterm term)
+{
+ ErlHeapFragment *bp;
+ Eterm *ptr = NULL;
+
+ switch (primary_tag(term)) {
+ case TAG_PRIMARY_IMMED1:
+ return;
+ case TAG_PRIMARY_LIST:
+ ptr = list_val(term);
+ ERTS_ASSERT(!is_header(CAR(ptr)));
+ ERTS_ASSERT(!is_header(CDR(ptr)));
+ break;
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(term);
+ ERTS_ASSERT(is_header(*ptr));
+ break;
+ case TAG_PRIMARY_HEADER:
+ default:
+ ERTS_INTERNAL_ERROR("Not valid term");
+ break;
+ }
+
+ if (erts_is_literal(term, ptr))
+ return;
+
+ for (bp = erts_message_to_heap_frag(mp); bp; bp = bp->next) {
+ if (bp->mem <= ptr && ptr < bp->mem + bp->used_size)
+ return;
+ }
+
+ ASSERT(erts_dbg_within_proc(ptr, c_p, NULL));
+}
+
+static Sint
+proc_sig_hdbg_check_queue(Process *proc,
+ int privq,
+ ErtsMessage **sig_next,
+ ErtsMessage **sig_last,
+ ErtsMessage **sig_nm_next,
+ ErtsMessage **sig_nm_last,
+ ErtsSigRecvTracing *tracing,
+ int *found_saved_last_p,
+ erts_aint32_t sig_psflg)
+{
+ ErtsMessage **next, *sig, **nm_next, **nm_last;
+ int last_nm_sig_found, nm_sigs = 0, found_next_trace = 0,
+ found_save = 0, last_sig_found = 0, found_saved_last = 0;
+ Sint msg_len = 0;
+ ErtsMessage **next_trace = tracing ? tracing->messages.next : NULL;
+ ErtsMessage **save = proc->sig_qs.save;
+ ErtsMessage **saved_last = proc->sig_qs.saved_last;
+
+ if (!privq) {
+ ErtsSignal *sig = (ErtsSignal *) *sig_next;
+ if (sig->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK) {
+
+ }
+ }
+
+ nm_next = sig_nm_next;
+ nm_last = sig_nm_last;
+ next = sig_next;
+ sig = *sig_next;
+
+ last_nm_sig_found = !nm_last;
+ if (last_nm_sig_found)
+ ERTS_ASSERT(!nm_next);
+ else
+ ERTS_ASSERT(nm_next);
+
+ while (1) {
+ ErtsSignal *nm_sig;
+
+ if (next == sig_last) {
+ ASSERT(!*next);
+ last_sig_found = 1;
+ }
+
+ if (next == save)
+ found_save = 1;
+
+ if (next == saved_last)
+ found_saved_last = 1;
+
+ if (next == next_trace) {
+ found_next_trace = 1;
+ ERTS_ASSERT(nm_sigs == 0);
+ }
+
+ while (sig && ERTS_SIG_IS_MSG(sig)) {
+ int i;
+ if (ERTS_SIG_IS_EXTERNAL_MSG(sig))
+ i = 1;
+ else
+ i = 0;
+ for (; i < ERL_MESSAGE_REF_ARRAY_SZ; i++)
+ chk_eterm(proc, privq, sig, sig->m[i]);
+
+ msg_len++;
+ next = &sig->next;
+ sig = sig->next;
+
+ if (next == sig_last) {
+ ASSERT(!*next);
+ last_sig_found = 1;
+ }
+
+ if (next == save)
+ found_save = 1;
+
+ if (next == saved_last)
+ found_saved_last = 1;
+
+ if (next == next_trace) {
+ found_next_trace = 1;
+ ERTS_ASSERT(nm_sigs == 0);
+ }
+ }
+
+ if (!sig)
+ break;
+
+ nm_sig = (ErtsSignal *) sig;
+
+ if (nm_sig->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK) {
+ ERTS_ASSERT(!privq);
+ ERTS_ASSERT(sig == *sig_next);
+ }
+ else {
+ nm_sigs++;
+
+ ERTS_ASSERT(!last_nm_sig_found);
+ ERTS_ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+
+ ERTS_ASSERT(nm_next == next);
+
+ if (nm_last == next) {
+ ASSERT(!nm_sig->common.specific.next);
+ last_nm_sig_found = 1;
+ }
+
+ nm_next = nm_sig->common.specific.next;
+
+ }
+
+ next = &nm_sig->common.next;
+ sig = nm_sig->common.next;
+
+ }
+
+ if (!privq) {
+ /* outer queue */
+ ERTS_ASSERT(!found_save);
+ ERTS_ASSERT(!found_saved_last);
+ }
+ else if (privq > 0) {
+ /* middle queue */
+ ERTS_ASSERT(!next_trace || found_next_trace);
+ ERTS_ASSERT(!found_save);
+ if (!found_saved_last_p) {
+ ERTS_ASSERT(!found_saved_last
+ || (proc->flags & F_DEFERRED_SAVED_LAST));
+ }
+ else {
+ if (*found_saved_last_p) {
+ ERTS_ASSERT(!found_saved_last);
+ ERTS_ASSERT(!(proc->flags & F_DEFERRED_SAVED_LAST));
+ }
+ else if (saved_last) {
+ ERTS_ASSERT(found_saved_last);
+ ERTS_ASSERT(proc->flags & F_DEFERRED_SAVED_LAST);
+ }
+ *found_saved_last_p |= found_saved_last;
+ }
+ }
+ else {
+ /* inner queue */
+ ERTS_ASSERT(!found_next_trace);
+ ERTS_ASSERT(nm_sigs == 0);
+ ERTS_ASSERT(found_save);
+ ERTS_ASSERT(!saved_last
+ || (found_saved_last
+ || (proc->flags & F_DEFERRED_SAVED_LAST)));
+ if (found_saved_last_p)
+ *found_saved_last_p |= found_saved_last;
+ }
+
+ ERTS_ASSERT(last_nm_sig_found);
+ ERTS_ASSERT(last_sig_found);
+
+ if (sig_psflg != ERTS_PSFLG_FREE) {
+ erts_aint32_t state = erts_atomic32_read_nob(&proc->state);
+ ERTS_ASSERT(nm_sigs ? !!(state & sig_psflg) : !(state & sig_psflg));
+ }
+
+ return msg_len;
+}
+
+void
+erts_proc_sig_hdbg_check_priv_queue(Process *p, int qlock, char *what, char *file, int line)
+{
+ int found_saved_last = 0;
+ Sint len, len1, len2;
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking()
+ || ERTS_PROC_IS_EXITING(p)
+ || (ERTS_PROC_LOCK_MAIN
+ & erts_proc_lc_my_proc_locks(p)));
+ len1 = proc_sig_hdbg_check_queue(p,
+ -1,
+ &p->sig_qs.first,
+ p->sig_qs.last,
+ NULL,
+ NULL,
+ NULL,
+ &found_saved_last,
+ ERTS_PSFLG_FREE);
+ len2 = proc_sig_hdbg_check_queue(p,
+ 1,
+ &p->sig_qs.cont,
+ p->sig_qs.cont_last,
+ p->sig_qs.nmsigs.next,
+ p->sig_qs.nmsigs.last,
+ NULL,
+ &found_saved_last,
+ ERTS_PSFLG_SIG_Q);
+ if (p->sig_qs.saved_last)
+ ERTS_ASSERT(found_saved_last);
+ len = proc_sig_privqs_len(p, qlock);
+ ERTS_ASSERT(len == len1 + len2);
+}
+
+void
+erts_proc_sig_hdbg_check_in_queue(Process *p, char *what, char *file, int line)
+{
+ Sint len;
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking()
+ || ERTS_PROC_IS_EXITING(p)
+ || (ERTS_PROC_LOCK_MSGQ
+ & erts_proc_lc_my_proc_locks(p)));
+ len = proc_sig_hdbg_check_queue(p,
+ 0,
+ &p->sig_inq.first,
+ p->sig_inq.last,
+ p->sig_inq.nmsigs.next,
+ p->sig_inq.nmsigs.last,
+ NULL,
+ NULL,
+ ERTS_PSFLG_SIG_IN_Q);
+ ASSERT(p->sig_inq.len == len); (void)len;
+}
+
+#endif /* ERTS_PROC_SIG_HARD_DEBUG */
diff --git a/erts/emulator/beam/erl_proc_sig_queue.h b/erts/emulator/beam/erl_proc_sig_queue.h
new file mode 100644
index 0000000000..3fc2d06b2d
--- /dev/null
+++ b/erts/emulator/beam/erl_proc_sig_queue.h
@@ -0,0 +1,1051 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2018. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Process signal queue implementation.
+ *
+ * Currently the following signals are handled:
+ * - Messages
+ * - Exit
+ * - Monitor
+ * - Demonitor
+ * - Monitor down
+ * - Persistent monitor message
+ * - Link
+ * - Unlink
+ * - Group leader
+ * - Is process alive
+ * - Process info request
+ * - Suspend request (monitor of suspend type)
+ * - Resume request (demonitor of suspend type)
+ * - Suspend cleanup (monitor down of suspend type)
+ * - Sync suspend
+ * - RPC request
+ * - Trace change
+ *
+ * The signal queue consists of three parts:
+ * - Outer queue (sig_inq field in process struct)
+ * - Middle queue (sig_qs field in process struct)
+ * - Inner queue (sig_qs field in process struct)
+ *
+ * Incoming signals are placed in the outer queue
+ * by other processes, ports, or by the runtime system
+ * itself. This queue is protected by the msgq process
+ * lock and may be accessed by any other entity. While
+ * a signal is located in the outer queue, it is still
+ * in transit between sender and receiver.
+ *
+ * The middle and the inner queues are private to the
+ * receiving process and can only be accessed while
+ * holding the main process lock. The signal changes
+ * from being in transit to being received while in
+ * the middle queue. Non-message signals are handled
+ * immediately upon reception while message signals
+ * are moved into the inner queue.
+ *
+ * In the outer and middle queues both message signals
+ * and non-message signals are mixed. Signals in these
+ * queues are referenced using two single linked lists.
+ * One single linked list that go through all signals
+ * in the queue and another single linked list that
+ * goes through only non-message signals. The list
+ * through the non-message signals is used for fast
+ * access to these signals in the middle queue, since
+ * these should be handled immediately upon reception.
+ *
+ * The inner queue consists only of one single linked
+ * list through the message signals. A receive
+ * expression can only operate on messages once they
+ * have entered the inner queue.
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERTS_PROC_SIG_QUEUE_H_TYPE__
+#define ERTS_PROC_SIG_QUEUE_H_TYPE__
+
+#if 0
+# define ERTS_PROC_SIG_HARD_DEBUG
+#endif
+#if 0
+# define ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+#endif
+
+struct erl_mesg;
+
+typedef struct {
+ struct erl_mesg *next;
+ union {
+ struct erl_mesg **next;
+ void *attachment;
+ } specific;
+ Eterm tag;
+} ErtsSignalCommon;
+
+#define ERTS_SIG_HANDLE_REDS_MAX_PREFERED (CONTEXT_REDS/40)
+
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+# define ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(P) \
+ ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__((P), "")
+# define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(P, QL) \
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__((P), (QL), "")
+# define ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__(P, What) \
+ erts_proc_sig_hdbg_check_in_queue((P), (What), __FILE__, __LINE__)
+# define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__(P, QL, What) \
+ erts_proc_sig_hdbg_check_priv_queue((P), (QL), (What), __FILE__, __LINE__)
+struct process;
+void erts_proc_sig_hdbg_check_priv_queue(struct process *c_p, int qlock,
+ char *what, char *file, int line);
+void erts_proc_sig_hdbg_check_in_queue(struct process *c_p, char *what,
+ char *file, int line);
+#else
+# define ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(P)
+# define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(P, QL)
+# define ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__(P, What)
+#define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__(P, QL, What)
+#endif
+
+#endif
+
+#if !defined(ERTS_PROC_SIG_QUEUE_H__) && !defined(ERTS_PROC_SIG_QUEUE_TYPE_ONLY)
+#define ERTS_PROC_SIG_QUEUE_H__
+
+#define ERTS_SIG_Q_OP_BITS 8
+#define ERTS_SIG_Q_OP_SHIFT 0
+#define ERTS_SIG_Q_OP_MASK ((1 << ERTS_SIG_Q_OP_BITS) - 1)
+
+#define ERTS_SIG_Q_TYPE_BITS 8
+#define ERTS_SIG_Q_TYPE_SHIFT ERTS_SIG_Q_OP_BITS
+#define ERTS_SIG_Q_TYPE_MASK ((1 << ERTS_SIG_Q_TYPE_BITS) - 1)
+
+#define ERTS_SIG_Q_NON_X_BITS__ (_HEADER_ARITY_OFFS \
+ + ERTS_SIG_Q_OP_BITS \
+ + ERTS_SIG_Q_TYPE_BITS)
+
+#define ERTS_SIG_Q_XTRA_BITS (32 - ERTS_SIG_Q_NON_X_BITS__)
+#define ERTS_SIG_Q_XTRA_SHIFT (ERTS_SIG_Q_OP_BITS \
+ + ERTS_SIG_Q_TYPE_BITS)
+#define ERTS_SIG_Q_XTRA_MASK ((1 << ERTS_SIG_Q_XTRA_BITS) - 1)
+
+
+#define ERTS_PROC_SIG_OP(Tag) \
+ ((int) (_unchecked_thing_arityval((Tag)) \
+ >> ERTS_SIG_Q_OP_SHIFT) & ERTS_SIG_Q_OP_MASK)
+
+#define ERTS_PROC_SIG_TYPE(Tag) \
+ ((Uint16) (_unchecked_thing_arityval((Tag)) \
+ >> ERTS_SIG_Q_TYPE_SHIFT) & ERTS_SIG_Q_TYPE_MASK)
+
+#define ERTS_PROC_SIG_XTRA(Tag) \
+ ((Uint32) (_unchecked_thing_arityval((Tag)) \
+ >> ERTS_SIG_Q_XTRA_SHIFT) & ERTS_SIG_Q_XTRA_MASK)
+
+#define ERTS_PROC_SIG_MAKE_TAG(Op, Type, Xtra) \
+ (ASSERT(0 <= (Xtra) && (Xtra) <= ERTS_SIG_Q_XTRA_MASK), \
+ _make_header((((Type) & ERTS_SIG_Q_TYPE_MASK) \
+ << ERTS_SIG_Q_TYPE_SHIFT) \
+ | (((Op) & ERTS_SIG_Q_OP_MASK) \
+ << ERTS_SIG_Q_OP_SHIFT) \
+ | (((Xtra) & ERTS_SIG_Q_XTRA_MASK) \
+ << ERTS_SIG_Q_XTRA_SHIFT), \
+ _TAG_HEADER_EXTERNAL_PID))
+
+
+/*
+ * ERTS_SIG_Q_OP_MSGQ_LEN_OFFS_MARK is not an actual
+ * operation. We keep it at the top of the OP range,
+ * larger than ERTS_SIG_Q_OP_MAX.
+ */
+#define ERTS_SIG_Q_OP_MSGQ_LEN_OFFS_MARK ERTS_SIG_Q_OP_MASK
+
+#define ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK \
+ ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_MSGQ_LEN_OFFS_MARK,0,0)
+
+struct dist_entry_;
+
+/*
+ * Send operations of currently supported process signals follow...
+ */
+
+/**
+ *
+ * @brief Send an exit signal to a process.
+ *
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] from Identifier of sender.
+ *
+ * @param[in] to Identifier of local process
+ * to send signal to.
+ *
+ * @param[in] reason Exit reason.
+ *
+ * @param[in] token Seq trace token.
+ *
+ * @param[in] normal_kills If non-zero, also normal exit
+ * reason will kill the receiver
+ * if it is not trapping exit.
+ *
+ */
+void
+erts_proc_sig_send_exit(Process *c_p, Eterm from, Eterm to,
+ Eterm reason, Eterm token, int normal_kills);
+
+/**
+ *
+ * @brief Send an exit signal due to broken link to a process.
+ *
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] from Identifier of sender.
+ *
+ * @param[in] lnk Pointer to link structure
+ * from the sending side. It
+ * should contain information
+ * about receiver.
+ *
+ * @param[in] reason Exit reason.
+ *
+ * @param[in] token Seq trace token.
+ *
+ */
+void
+erts_proc_sig_send_link_exit(Process *c_p, Eterm from, ErtsLink *lnk,
+ Eterm reason, Eterm token);
+
+/**
+ *
+ * @brief Send an link signal to a process.
+ *
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] lnk Pointer to link structure to
+ * insert on receiver side.
+ *
+ * @return A non-zero value if
+ * signal was successfully
+ * sent. If a zero, value
+ * the signal was not sent
+ * due to the receiver not
+ * existing. The sender
+ * needs to deallocate the
+ * link structure.
+ *
+ */
+int
+erts_proc_sig_send_link(Process *c_p, Eterm to, ErtsLink *lnk);
+
+/**
+ *
+ * @brief Send an unlink signal to a process.
+ *
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] lnk Pointer to link structure from
+ * the sending side. It should
+ * contain information about
+ * receiver.
+ */
+void
+erts_proc_sig_send_unlink(Process *c_p, ErtsLink *lnk);
+
+/**
+ *
+ * @brief Send an exit signal due to broken link to a process.
+ *
+ * This function is used instead of erts_proc_sig_send_link_exit()
+ * when the signal arrives via the distribution and
+ * no link structure is available.
+ *
+ * @param[in] dep Distribution entry of channel
+ * that the signal arrived on.
+ *
+ * @param[in] from Identifier of sender.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] reason Exit reason.
+ *
+ * @param[in] token Seq trace token.
+ *
+ */
+void
+erts_proc_sig_send_dist_link_exit(struct dist_entry_ *dep,
+ Eterm from, Eterm to,
+ Eterm reason, Eterm token);
+
+/**
+ *
+ * @brief Send an unlink signal to a process.
+ *
+ * This function is used instead of erts_proc_sig_send_unlink()
+ * when the signal arrives via the distribution and
+ * no link structure is available.
+ *
+ * @param[in] dep Distribution entry of channel
+ * that the signal arrived on.
+ *
+ * @param[in] from Identifier of sender.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ */
+void
+erts_proc_sig_send_dist_unlink(struct dist_entry_ *dep,
+ Eterm from, Eterm to);
+
+/**
+ *
+ * @brief Send a monitor down signal to a process.
+ *
+ * @param[in] mon Pointer to target monitor
+ * structure from the sending
+ * side. It should contain
+ * information about receiver.
+ *
+ * @param[in] reason Exit reason.
+ *
+ */
+void
+erts_proc_sig_send_monitor_down(ErtsMonitor *mon, Eterm reason);
+
+/**
+ *
+ * @brief Send a demonitor signal to a process.
+ *
+ * @param[in] mon Pointer to origin monitor
+ * structure from the sending
+ * side. It should contain
+ * information about receiver.
+ *
+ * @param[in] reason Exit reason.
+ *
+ */
+void
+erts_proc_sig_send_demonitor(ErtsMonitor *mon);
+
+/**
+ *
+ * @brief Send a monitor signal to a process.
+ *
+ * @param[in] mon Pointer to target monitor
+ * structure to insert on
+ * receiver side.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @return A non-zero value if
+ * signal was successfully
+ * sent. If a zero, value
+ * the signal was not sent
+ * due to the receiver not
+ * existing. The sender
+ * needs to deallocate the
+ * monitor structure.
+ *
+ */
+int
+erts_proc_sig_send_monitor(ErtsMonitor *mon, Eterm to);
+
+/**
+ *
+ * @brief Send a monitor down signal to a process.
+ *
+ * This function is used instead of erts_proc_sig_send_monitor_down()
+ * when the signal arrives via the distribution and
+ * no link structure is available.
+ *
+ * @param[in] dep Pointer to distribution entry
+ * of channel that the signal
+ * arrived on.
+ *
+ * @param[in] ref Reference identifying the monitor.
+ *
+ * @param[in] from Identifier of sender.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] reason Exit reason.
+ *
+ */
+void
+erts_proc_sig_send_dist_monitor_down(DistEntry *dep, Eterm ref,
+ Eterm from, Eterm to,
+ Eterm reason);
+
+/**
+ *
+ * @brief Send a demonitor signal to a process.
+ *
+ * This function is used instead of erts_proc_sig_send_demonitor()
+ * when the signal arrives via the distribution and
+ * no monitor structure is available.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] ref Reference identifying the monitor.
+ *
+ */
+void
+erts_proc_sig_send_dist_demonitor(Eterm to, Eterm ref);
+
+/**
+ *
+ * @brief Send a persistent monitor triggered signal to a process.
+ *
+ * Used by monitors that are not auto disabled such as for
+ * example 'time_offset' monitors.
+ *
+ * @param[in] type Monitor type.
+ *
+ * @param[in] key Monitor key.
+ *
+ * @param[in] from Identifier of sender.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] msg Message template.
+ *
+ * @param[in] msg_sz Heap size of message template.
+ *
+ */
+void
+erts_proc_sig_send_persistent_monitor_msg(Uint16 type, Eterm key,
+ Eterm from, Eterm to,
+ Eterm msg, Uint msg_sz);
+
+/**
+ *
+ * @brief Send a trace change signal to a process.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] on Trace flags to enable.
+ *
+ * @param[in] off Trace flags to disable.
+ *
+ * @param[in] tracer Tracer to set. If the non-value,
+ * tracer will not be changed.
+ *
+ */
+void
+erts_proc_sig_send_trace_change(Eterm to, Uint on, Uint off,
+ Eterm tracer);
+
+/**
+ *
+ * @brief Send a group leader signal to a process.
+ *
+ * Set group-leader of receiving process. If sent locally,
+ * a response message '{Ref, Result}' is sent to the original
+ * sender when performed where Ref is the reference passed
+ * as 'ref' argument, and Result is either 'true' or 'badarg'.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ * NULL if signal arrived via
+ * distribution.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] gl Identifier of new group leader.
+ *
+ * @param[in] ref Reference to use in response
+ * message to locally sending
+ * process (i.e., c_p when c_p
+ * is non-null).
+ *
+ */
+void
+erts_proc_sig_send_group_leader(Process *c_p, Eterm to, Eterm gl,
+ Eterm ref);
+
+/**
+ *
+ * @brief Send an 'is process alive' signal to a process.
+ *
+ * A response message '{Ref, Result}' is sent to the
+ * sender when performed where Ref is the reference passed
+ * as 'ref' argument, and Result is either 'true' or 'false'.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ * NULL if signal arrived via
+ * distribution.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] ref Reference to use in response
+ * message to the sending
+ * process (i.e., c_p).
+ *
+ */
+void
+erts_proc_sig_send_is_alive_request(Process *c_p, Eterm to,
+ Eterm ref);
+
+/**
+ *
+ * @brief Send a 'process info request' signal to a process.
+ *
+ * A response message '{Ref, Result}' is sent to the
+ * sender when performed where Ref is the reference passed
+ * as 'ref' argument, and Result corresponds to return result
+ * from erlang:process_info/[1,2].
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ * NULL if signal arrived via
+ * distribution.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] item_ix Info index array to pass to
+ * erts_process_info()
+ *
+ * @param[in] len Lenght of info index array
+ *
+ * @param[in] need_msgq_len Non-zero if message queue
+ * length is needed; otherwise,
+ * zero. If non-zero, sig_qs.len
+ * will be set to correspond
+ * to the message queue length
+ * before call to
+ * erts_process_info()
+ *
+ * @param[in] flags Flags to pass to
+ * erts_process_info()
+ *
+ * @param[in] reserve_size Heap size that is known to
+ * be needed. May not be correct
+ * though.
+ *
+ * @param[in] ref Reference to use in response
+ * message to the sending
+ * process (i.e., c_p).
+ *
+ */
+int
+erts_proc_sig_send_process_info_request(Process *c_p,
+ Eterm to,
+ int *item_ix,
+ int len,
+ int need_msgq_len,
+ int flags,
+ Uint reserve_size,
+ Eterm ref);
+
+/**
+ *
+ * @brief Send a 'sync suspend' signal to a process.
+ *
+ * A response message '{Tag, Reply}' is sent to the
+ * sender when performed where Tag is the term passed
+ * as 'tag' argument. Reply is either 'suspended',
+ * 'not_suspended', 'exited' if the operation is
+ * asynchronous; otherwise, the 'reply' argument or
+ * 'badarg' if process terminated.
+ *
+ * This signal does *not* change the suspend state, only
+ * reads and reply the state. This signal is typically
+ * sent after a suspend request (monitor of suspend type)
+ * signal has been sent to the process in order to get a
+ * response when the suspend monitor has been processed.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] tag Tag to use in response
+ * message to the sending
+ * process (i.e., c_p).
+ *
+ * @param[in] reply Reply to send if this
+ * is a synchronous operation;
+ * otherwise, THE_NON_VALUE.
+ */
+void
+erts_proc_sig_send_sync_suspend(Process *c_p, Eterm to,
+ Eterm tag, Eterm reply);
+
+/**
+ *
+ * @brief Send an 'rpc' signal to a process.
+ *
+ * The function 'func' will be executed in the
+ * context of the receiving process. A response
+ * message '{Ref, Result}' is sent to the sender
+ * when 'func' has been called. 'Ref' is the reference
+ * returned by this function and 'Result' is the
+ * term returned by 'func'. If the return value of
+ * 'func' is not an immediate term, 'func' has to
+ * allocate a heap fragment where the result is stored
+ * and update the the heap fragment pointer pointer
+ * passed as third argument to point to it.
+ *
+ * If this function returns a reference, 'func' will
+ * be called in the context of the receiver. However,
+ * note that this might happen when the receiver is in
+ * an exiting state. The caller of this function
+ * *unconditionally* has to enter a receive that match
+ * on the returned reference in all clauses as next
+ * receive; otherwise, bad things will happen!
+ *
+ * If THE_NON_VALUE is returned, the receiver did not
+ * exist. The signal was not sent, and no specific
+ * receive has to be entered by the caller.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] to Identifier of receiver process.
+ *
+ * @param[in] reply Non-zero if a reply is wanted.
+ *
+ * @param[in] func Function to execute in the
+ * context of the receiver.
+ * First argument will be a
+ * pointer to the process struct
+ * of the receiver process.
+ * Second argument will be 'arg'
+ * (see below). Third argument
+ * will be a pointer to a pointer
+ * to a heap fragment for storage
+ * of result returned from 'func'
+ * (i.e. an 'out' parameter).
+ *
+ * @param[in] arg Void pointer to argument
+ * to pass as second argument
+ * in call of 'func'.
+ *
+ * @returns If the request was sent,
+ * an internal ordinary
+ * reference; otherwise,
+ * THE_NON_VALUE (non-existing
+ * receiver).
+ */
+Eterm
+erts_proc_sig_send_rpc_request(Process *c_p,
+ Eterm to,
+ int reply,
+ Eterm (*func)(Process *, void *, int *, ErlHeapFragment **),
+ void *arg);
+
+/*
+ * End of send operations of currently supported process signals.
+ */
+
+
+/**
+ *
+ * @brief Handle incoming signals.
+ *
+ * Called by an ordinary scheduler in order to handle incoming
+ * signals for a process. The work is done on the middle part
+ * of the signal queue. The maximum amount of signals handled
+ * is limited by the amount of reductions given when calling.
+ * Note that a reduction does not necessarily map to a signal.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[out] statep Pointer to process state after
+ * signal handling. May not be NULL.
+ *
+ * @param[in,out] redsp Pointer to an integer containing
+ * reductions. On input, the amount
+ * of preferred reductions to be
+ * used by the call. On output, the
+ * amount of reductions consumed.
+ *
+ * @param[in] max_reds Absolute maximum of reductions
+ * to use. If the process cannot
+ * make progress after the preferred
+ * amount of reductions has been
+ * consumed, signal handling may
+ * proceed up to a maximum of
+ * 'max_reds' in order to make
+ * the process able to proceed
+ * with other tasks after handling
+ * has finished.
+ *
+ * @param[in] local_only If is zero, new signals may be
+ * fetched from the outer queue and
+ * put in the middle queue before
+ * signal handling is performed. If
+ * non-zero, no new signals will be
+ * fetched before handling begins.
+ *
+ * @return Returns a non-zero value, when
+ * no more signals to handle in the
+ * middle queue remain. A zero
+ * return value means that there
+ * remains signals in the middle
+ * queue.
+ */
+int
+erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
+ int *redsp, int max_reds,
+ int local_only);
+
+/**
+ *
+ * @brief Handle remaining signals for an exiting process
+ *
+ * Called as part of termination of a process. It will handle
+ * remaining signals.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in,out] redsp Pointer to an integer containing
+ * reductions. On input, the amount
+ * of maximum reductions to be
+ * used by the call. On output, the
+ * amount of reductions consumed.
+ *
+ * @return Returns a non-zero value, when
+ * no more signals to handle in the
+ * middle queue remain. A zero
+ * return value means that there
+ * remains signals in the middle
+ * queue.
+ */
+int
+erts_proc_sig_handle_exit(Process *c_p, int *redsp);
+
+/**
+ *
+ * @brief Helper for loop_rec instruction.
+ *
+ * This function should only be called from the loop_rec
+ * instruction (or equivalents). It is called when loop_rec
+ * reach the end of the inner queue (which is the only
+ * part of the signal queue that receive is allowed to
+ * operate on). When called, this function tries to make
+ * more messages available in the inner queue. This by
+ * fetching signals from the outer queue to the middle
+ * queue and/or processing signals in the middle queue.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] fcalls Content of FCALLS in
+ * process_main()
+ *
+ * @param[in] neg_o_reds Content of neg_o_reds in
+ * process_main()
+ *
+ * @param[out] msgpp Pointer to pointer to next
+ * available message to process.
+ * If *msgpp == NULL, no more
+ * messages are available.
+ *
+ * @param[out] get_outp Pointer to an integer
+ * indicating how to respond
+ * if no more messages are
+ * available (msgpp). If integer
+ * is set to zero, loop_rec
+ * should jump to an appropriate
+ * wait instruction. If zero,
+ * the message queue lock remain
+ * locked since the test for
+ * more messages was done.
+ * If the integer is set to a
+ * value larger that zero, the
+ * process exited. If the integer
+ * is set to a value less than
+ * zero, the process is required
+ * to yield.
+ *
+ *
+ * @return The amount of reductions
+ * consumed.
+ *
+ */
+int
+erts_proc_sig_receive_helper(Process *c_p, int fcalls,
+ int neg_o_reds, ErtsMessage **msgpp,
+ int *get_outp);
+
+/**
+ *
+ * @brief Fetch signals from the outer queue
+ *
+ * Fetches signals from outer queue and places them in the
+ * middle queue ready for signal handling. If the middle
+ * queue is empty, only message signals were present in the
+ * outer queue, and no receive tracing has been enabled on
+ * the process, the middle queue is bypassed and messages
+ * are delivered directly to the inner queue instead.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ * @returns Amount of message signals in
+ * inner plus middle signal
+ * queues after fetch completed
+ * (NOT the message queue
+ * length).
+ */
+ERTS_GLB_INLINE Sint erts_proc_sig_fetch(Process *p);
+
+/**
+ *
+ * @brief Get amount of messages in private queues
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @returns Amount of message signals in
+ * inner plus middle signal
+ * queues after fetch completed
+ * (NOT the message queue
+ * length).
+ */
+Sint
+erts_proc_sig_privqs_len(Process *c_p);
+
+
+/**
+ * @brief Enqueue list of signals on process.
+ *
+ * Message queue must be locked on receiving process.
+ *
+ * @param rp Receiving process.
+ * @param first First signal in list.
+ * @param last Last signal in list.
+ * @param last_next Pointer to next-pointer to last non-message signal
+ * or NULL if no non-message signal after 'first'.
+ * @param msg_cnt Number of message signals in list.
+ * @param in_state 'state' of rp.
+ *
+ * @return 'state' of rp.
+ */
+erts_aint32_t
+erts_enqueue_signals(Process *rp, ErtsMessage *first,
+ ErtsMessage **last, ErtsMessage **last_next,
+ Uint msg_cnt,
+ erts_aint32_t in_state);
+
+/**
+ *
+ * @brief Flush pending signal.
+ *
+ */
+void
+erts_proc_sig_send_pending(ErtsSchedulerData* esdp);
+
+/**
+ *
+ * @brief Schedule process to handle enqueued signal(s).
+ *
+ * @param rp Receiving process.
+ * @param state 'state' of rp.
+ * @param enable_flag Additional state flags to enable, like
+ * ERTS_PSFLG_ACTIVE if message has been enqueued.
+ */
+ERTS_GLB_INLINE void erts_proc_notify_new_sig(Process* rp, erts_aint32_t state,
+ erts_aint32_t enable_flag);
+
+void erts_make_dirty_proc_handled(Eterm pid, erts_aint32_t state,
+ erts_aint32_t prio);
+
+
+typedef struct {
+ Uint size;
+ ErtsMessage *msgp;
+} ErtsMessageInfo;
+
+/**
+ *
+ * @brief Prepare signal queue for inspection by process_info()
+ *
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] rp Pointer to process struct of
+ * process to inspect.
+ *
+ * @param[in] rp_locks Process locks held on 'rp'.
+ *
+ * @param[in] info_on_self Integer set to non-zero value
+ * if caller is inspecting itself;
+ * otherwise, zero.
+ *
+ * @param[in] mip Pointer to array of
+ * ErtsMessageInfo structures.
+ */
+Uint erts_proc_sig_prep_msgq_for_inspection(Process *c_p,
+ Process *rp,
+ ErtsProcLocks rp_locks,
+ int info_on_self,
+ ErtsMessageInfo *mip);
+
+/**
+ *
+ * @brief Move message data of messages in private queues to heap
+ *
+ * Move message data of messages in private queues to the heap.
+ * This is part of GC of processes that uses on-heap message
+ * data.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ */
+void erts_proc_sig_move_msgs_to_heap(Process *c_p);
+
+/**
+ *
+ * @brief Size of signal in bytes.
+ *
+ * @param[in] sig Signal to inspect.
+ *
+ */
+Uint erts_proc_sig_signal_size(ErtsSignal *sig);
+
+
+/**
+ *
+ * @brief Clear seq trace tokens on all signals
+ *
+ * Assumes thread progress has been blocked!
+ *
+ * @param[in] c_p Pointer to process
+ *
+ */
+void
+erts_proc_sig_clear_seq_trace_tokens(Process *c_p);
+
+/**
+ *
+ * @brief Handle pending suspend requests
+ *
+ * Should be called by processes when they stop
+ * execution on a dirty scheduler if they have
+ * pending suspend requests (i.e. when
+ * ERTS_PROC_GET_PENDING_SUSPEND(c_p) != NULL).
+ *
+ * @param[in] c_p Pointer to executing
+ * process
+ */
+void
+erts_proc_sig_handle_pending_suspend(Process *c_p);
+
+/**
+ * @brief Initialize this functionality
+ */
+void erts_proc_sig_queue_init(void);
+
+void
+erts_proc_sig_debug_foreach_sig(Process *c_p,
+ void (*msg_func)(ErtsMessage *, void *),
+ void (*oh_func)(ErlOffHeap *, void *),
+ void (*mon_func)(ErtsMonitor *, void *),
+ void (*lnk_func)(ErtsLink *, void *),
+ void *arg);
+
+extern Process *erts_dirty_process_signal_handler;
+extern Process *erts_dirty_process_signal_handler_high;
+extern Process *erts_dirty_process_signal_handler_max;
+
+void erts_proc_sig_fetch__(Process *proc);
+Sint erts_proc_sig_fetch_msgq_len_offs__(Process *proc);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Sint
+erts_proc_sig_fetch(Process *proc)
+{
+ Sint res = 0;
+ ErtsSignal *sig;
+
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking()
+ || ERTS_PROC_IS_EXITING(proc)
+ || ((erts_proc_lc_my_proc_locks(proc)
+ & (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_MSGQ))
+ == (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_MSGQ)));
+
+ ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(proc);
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(proc, !0);
+
+ sig = (ErtsSignal *) proc->sig_inq.first;
+ if (sig) {
+ if (ERTS_LIKELY(sig->common.tag != ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK))
+ erts_proc_sig_fetch__(proc);
+ else
+ res = erts_proc_sig_fetch_msgq_len_offs__(proc);
+ }
+
+ res += proc->sig_qs.len;
+
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(proc, !0);
+
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ {
+ Sint len = 0;
+ ERTS_FOREACH_SIG_PRIVQS(
+ proc, mp,
+ {
+ if (ERTS_SIG_IS_MSG(mp))
+ len++;
+ });
+ ERTS_ASSERT(res == len);
+ }
+#endif
+
+ return res;
+}
+
+ERTS_GLB_INLINE void
+erts_proc_notify_new_sig(Process* rp, erts_aint32_t state,
+ erts_aint32_t enable_flag)
+{
+ if (~(state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_SIG_IN_Q))
+ | (~state & enable_flag)) {
+ /* Schedule process... */
+ state = erts_proc_sys_schedule(rp, state, enable_flag);
+ }
+
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ erts_make_dirty_proc_handled(rp->common.id, state, -1);
+ }
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERTS_PROC_SIG_QUEUE_H__ */
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index bc59147c6c..2427d87f66 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,6 +24,8 @@
# include "config.h"
#endif
+#define ERTS_WANT_BREAK_HANDLING
+
#include <stddef.h> /* offsetof() */
#include "sys.h"
#include "erl_vm.h"
@@ -34,7 +36,6 @@
#include "erl_db.h"
#include "dist.h"
#include "beam_catches.h"
-#include "erl_instrument.h"
#include "erl_threads.h"
#include "erl_binary.h"
#include "beam_bp.h"
@@ -48,6 +49,10 @@
#include "erl_bif_unique.h"
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
+#include "erl_nfunc_sched.h"
+#include "erl_check_io.h"
+#include "erl_poll.h"
+#include "erl_proc_sig_queue.h"
#define ERTS_CHECK_TIME_REDS CONTEXT_REDS
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
@@ -106,42 +111,31 @@
#define LOW_BIT (1 << PRIORITY_LOW)
#define PORT_BIT (1 << ERTS_PORT_PRIO_LEVEL)
-#define ERTS_EMPTY_RUNQ(RQ) \
- ((ERTS_RUNQ_FLGS_GET_NOB((RQ)) & ERTS_RUNQ_FLGS_QMASK) == 0 \
- && (RQ)->misc.start == NULL)
+#define ERTS_IS_RUNQ_EMPTY_FLGS(FLGS) \
+ (!((FLGS) & (ERTS_RUNQ_FLGS_QMASK|ERTS_RUNQ_FLG_MISC_OP)))
-#undef RUNQ_READ_RQ
-#undef RUNQ_SET_RQ
-#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_smp_atomic_read_nob((X)))
-#define RUNQ_SET_RQ(X, RQ) erts_smp_atomic_set_nob((X), (erts_aint_t) (RQ))
+#define ERTS_IS_RUNQ_EMPTY_PORTS_FLGS(FLGS) \
+ (!((FLGS) & (PORT_BIT|ERTS_RUNQ_FLG_MISC_OP)))
-#ifdef DEBUG
-# if defined(ARCH_64)
-# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
- (RUNQ_SET_RQ((RQP), (0xdeadbeefdead0003LL | ((N) << 4)))
-# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
-do { \
- ASSERT((RQP) != NULL); \
- ASSERT(((((Uint) (RQP)) & ((Uint) 0x3))) == ((Uint) 0)); \
- ASSERT((((Uint) (RQP)) & ~((Uint) 0xffff)) != ((Uint) 0xdeadbeefdead0000LL));\
-} while (0)
-# else
-# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
- (RUNQ_SET_RQ((RQP), (0xdead0003 | ((N) << 4))))
-# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
-do { \
- ASSERT((RQP) != NULL); \
- ASSERT(((((UWord) (RQP)) & ((UWord) 1))) == ((UWord) 0)); \
- ASSERT((((UWord) (RQP)) & ~((UWord) 0xffff)) != ((UWord) 0xdead0000)); \
-} while (0)
-# endif
-#else
-# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N)
-# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP)
-#endif
+#define ERTS_EMPTY_RUNQ(RQ) \
+ ERTS_IS_RUNQ_EMPTY_FLGS(ERTS_RUNQ_FLGS_GET_NOB((RQ)))
#define ERTS_EMPTY_RUNQ_PORTS(RQ) \
- (RUNQ_READ_LEN(&(RQ)->ports.info.len) == 0 && (RQ)->misc.start == NULL)
+ ERTS_IS_RUNQ_EMPTY_FLGS(ERTS_RUNQ_FLGS_GET_NOB((RQ)))
+
+static ERTS_INLINE int
+runq_got_work_to_execute_flags(Uint32 flags)
+{
+ if (flags & ERTS_RUNQ_FLG_HALTING)
+ return !ERTS_IS_RUNQ_EMPTY_PORTS_FLGS(flags);
+ return !ERTS_IS_RUNQ_EMPTY_FLGS(flags);
+}
+
+static ERTS_INLINE int
+runq_got_work_to_execute(ErtsRunQueue *rq)
+{
+ return runq_got_work_to_execute_flags(ERTS_RUNQ_FLGS_GET_NOB(rq));
+}
const Process erts_invalid_process = {{ERTS_INVALID_PID}};
@@ -150,10 +144,10 @@ extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
int ERTS_WRITE_UNLIKELY(erts_default_spo_flags) = SPO_ON_HEAP_MSGQ;
-int ERTS_WRITE_UNLIKELY(erts_eager_check_io) = 1;
int ERTS_WRITE_UNLIKELY(erts_sched_compact_load);
int ERTS_WRITE_UNLIKELY(erts_sched_balance_util) = 0;
Uint ERTS_WRITE_UNLIKELY(erts_no_schedulers);
+Uint ERTS_WRITE_UNLIKELY(erts_no_total_schedulers);
Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_cpu_schedulers) = 0;
Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_io_schedulers) = 0;
@@ -171,131 +165,112 @@ static UWord thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_O
ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
int erts_sched_thread_suggested_stack_size = -1;
-
+int erts_dcpu_sched_thread_suggested_stack_size = -1;
+int erts_dio_sched_thread_suggested_stack_size = -1;
#ifdef ERTS_ENABLE_LOCK_CHECK
ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
#endif
-static struct {
+typedef struct {
int aux_work;
int tse;
- int sys_schedule;
-} sched_busy_wait;
+} ErtsBusyWaitParams;
-#ifdef ERTS_SMP
-int erts_disable_proc_not_running_opt;
+static ErtsBusyWaitParams sched_busy_wait_params[ERTS_SCHED_TYPE_LAST + 1];
+
+static ERTS_INLINE ErtsBusyWaitParams *
+sched_get_busy_wait_params(ErtsSchedulerData *esdp)
+{
+ return &sched_busy_wait_params[esdp->type];
+}
static ErtsAuxWorkData *aux_thread_aux_work_data;
+static ErtsAuxWorkData *poll_thread_aux_work_data;
#define ERTS_SCHDLR_SSPND_CHNG_NMSB (((erts_aint32_t) 1) << 0)
#define ERTS_SCHDLR_SSPND_CHNG_MSB (((erts_aint32_t) 1) << 1)
#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((erts_aint32_t) 1) << 2)
#define ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN (((erts_aint32_t) 1) << 3)
-typedef enum {
- ERTS_SCHED_NORMAL,
- ERTS_SCHED_DIRTY_CPU,
- ERTS_SCHED_DIRTY_IO
-} ErtsSchedType;
-
-typedef struct {
+typedef struct ErtsMultiSchedulingBlock_ {
int ongoing;
ErtsProcList *blckrs;
ErtsProcList *chngq;
} ErtsMultiSchedulingBlock;
-static struct {
- erts_smp_mtx_t mtx;
- Uint32 online;
- Uint32 curr_online;
- Uint32 active;
- erts_smp_atomic32_t changing;
+typedef struct ErtsSchedTypeCounters_ {
+ Uint32 normal;
+ Uint32 dirty_cpu;
+ Uint32 dirty_io;
+} ErtsSchedTypeCounters;
+
+static struct ErtsSchedSuspend_ {
+ erts_mtx_t mtx;
+ ErtsSchedTypeCounters online;
+ ErtsSchedTypeCounters curr_online;
+ ErtsSchedTypeCounters active;
+ erts_atomic32_t changing;
ErtsProcList *chngq;
Eterm changer;
ErtsMultiSchedulingBlock nmsb; /* Normal multi Scheduling Block */
ErtsMultiSchedulingBlock msb; /* Multi Scheduling Block */
+ ErtsSchedType last_msb_dirty_type;
} schdlr_sspnd;
-#define ERTS_SCHDLR_SSPND_S_BITS 10
-#define ERTS_SCHDLR_SSPND_DCS_BITS 11
-#define ERTS_SCHDLR_SSPND_DIS_BITS 11
-
-#define ERTS_SCHDLR_SSPND_S_MASK ((1 << ERTS_SCHDLR_SSPND_S_BITS)-1)
-#define ERTS_SCHDLR_SSPND_DCS_MASK ((1 << ERTS_SCHDLR_SSPND_DCS_BITS)-1)
-#define ERTS_SCHDLR_SSPND_DIS_MASK ((1 << ERTS_SCHDLR_SSPND_DIS_BITS)-1)
-
-#define ERTS_SCHDLR_SSPND_S_SHIFT 0
-#define ERTS_SCHDLR_SSPND_DCS_SHIFT (ERTS_SCHDLR_SSPND_S_SHIFT \
- + ERTS_SCHDLR_SSPND_S_BITS)
-#define ERTS_SCHDLR_SSPND_DIS_SHIFT (ERTS_SCHDLR_SSPND_DCS_SHIFT \
- + ERTS_SCHDLR_SSPND_DCS_BITS)
-
-#if (ERTS_SCHDLR_SSPND_S_BITS \
- + ERTS_SCHDLR_SSPND_DCS_BITS \
- + ERTS_SCHDLR_SSPND_DIS_BITS) > 32
-# error Wont fit in Uint32
-#endif
-
-#if (ERTS_MAX_NO_OF_SCHEDULERS-1) > ERTS_SCHDLR_SSPND_S_MASK
-# error Max no schedulers wont fit in its bit-field
-#endif
-#if ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS > ERTS_SCHDLR_SSPND_DCS_MASK
-# error Max no dirty cpu schedulers wont fit in its bit-field
-#endif
-#if ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS > ERTS_SCHDLR_SSPND_DIS_MASK
-# error Max no dirty io schedulers wont fit in its bit-field
-#endif
-
-#define ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(S, DCS, DIS) \
- ((((Uint32) (((S) & ERTS_SCHDLR_SSPND_S_MASK))-1) \
- << ERTS_SCHDLR_SSPND_S_SHIFT) \
- | ((((Uint32) ((DCS) & ERTS_SCHDLR_SSPND_DCS_MASK)) \
- << ERTS_SCHDLR_SSPND_DCS_SHIFT)) \
- | ((((Uint32) ((DIS) & ERTS_SCHDLR_SSPND_DIS_MASK)) \
- << ERTS_SCHDLR_SSPND_DIS_SHIFT)))
-
static void init_scheduler_suspend(void);
static ERTS_INLINE Uint32
-schdlr_sspnd_get_nscheds(Uint32 *valp, ErtsSchedType type)
+schdlr_sspnd_eq_nscheds(ErtsSchedTypeCounters *val1p, ErtsSchedTypeCounters *val2p)
+{
+ int res = val1p->normal == val2p->normal;
+ res &= val1p->dirty_cpu == val2p->dirty_cpu;
+ res &= val1p->dirty_io == val2p->dirty_io;
+ return res;
+}
+
+static ERTS_INLINE Uint32
+schdlr_sspnd_get_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
{
- Uint32 res = (Uint32) (*valp);
switch (type) {
case ERTS_SCHED_NORMAL:
- res >>= ERTS_SCHDLR_SSPND_S_SHIFT;
- res &= (Uint32) ERTS_SCHDLR_SSPND_S_MASK;
- res++;
- break;
+ return valp->normal;
case ERTS_SCHED_DIRTY_CPU:
- res >>= ERTS_SCHDLR_SSPND_DCS_SHIFT;
- res &= (Uint32) ERTS_SCHDLR_SSPND_DCS_MASK;
- break;
+ return valp->dirty_cpu;
case ERTS_SCHED_DIRTY_IO:
- res >>= ERTS_SCHDLR_SSPND_DIS_SHIFT;
- res &= (Uint32) ERTS_SCHDLR_SSPND_DIS_MASK;
- break;
+ return valp->dirty_io;
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
return 0;
}
+}
+#ifdef DEBUG
+static ERTS_INLINE Uint32
+schdlr_sspnd_get_nscheds_tot(ErtsSchedTypeCounters *valp)
+{
+ Uint32 res = valp->normal;
+ res += valp->dirty_cpu;
+ res += valp->dirty_io;
return res;
}
+#endif
static ERTS_INLINE void
-schdlr_sspnd_dec_nscheds(Uint32 *valp, ErtsSchedType type)
+schdlr_sspnd_dec_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
{
ASSERT(schdlr_sspnd_get_nscheds(valp, type) > 0);
switch (type) {
case ERTS_SCHED_NORMAL:
- *valp -= ((Uint32) 1) << ERTS_SCHDLR_SSPND_S_SHIFT;
+ valp->normal--;
break;
case ERTS_SCHED_DIRTY_CPU:
- *valp -= ((Uint32) 1) << ERTS_SCHDLR_SSPND_DCS_SHIFT;
+ valp->dirty_cpu--;
break;
case ERTS_SCHED_DIRTY_IO:
- *valp -= ((Uint32) 1) << ERTS_SCHDLR_SSPND_DIS_SHIFT;
+ valp->dirty_io--;
break;
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
@@ -303,23 +278,18 @@ schdlr_sspnd_dec_nscheds(Uint32 *valp, ErtsSchedType type)
}
static ERTS_INLINE void
-schdlr_sspnd_inc_nscheds(Uint32 *valp, ErtsSchedType type)
+schdlr_sspnd_inc_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type)
{
switch (type) {
case ERTS_SCHED_NORMAL:
- ASSERT(schdlr_sspnd_get_nscheds(valp, type)
- < ERTS_MAX_NO_OF_SCHEDULERS-1);
- *valp += ((Uint32) 1) << ERTS_SCHDLR_SSPND_S_SHIFT;
+ valp->normal++;
break;
case ERTS_SCHED_DIRTY_CPU:
- ASSERT(schdlr_sspnd_get_nscheds(valp, type)
- < ERTS_MAX_NO_OF_DIRTY_CPU_SCHEDULERS);
- *valp += ((Uint32) 1) << ERTS_SCHDLR_SSPND_DCS_SHIFT;
+ valp->dirty_cpu++;
break;
case ERTS_SCHED_DIRTY_IO:
- ASSERT(schdlr_sspnd_get_nscheds(valp, type)
- < ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS);
- *valp += ((Uint32) 1) << ERTS_SCHDLR_SSPND_DIS_SHIFT;
+ valp->dirty_io++;
break;
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
@@ -327,43 +297,30 @@ schdlr_sspnd_inc_nscheds(Uint32 *valp, ErtsSchedType type)
}
static ERTS_INLINE void
-schdlr_sspnd_set_nscheds(Uint32 *valp, ErtsSchedType type, Uint32 no)
+schdlr_sspnd_set_nscheds(ErtsSchedTypeCounters *valp,
+ ErtsSchedType type, Uint32 no)
{
- Uint32 val = *valp;
-
switch (type) {
case ERTS_SCHED_NORMAL:
- ASSERT(no > 0);
- val &= ~(((Uint32) ERTS_SCHDLR_SSPND_S_MASK)
- << ERTS_SCHDLR_SSPND_S_SHIFT);
- val |= (((no-1) & ((Uint32) ERTS_SCHDLR_SSPND_S_MASK))
- << ERTS_SCHDLR_SSPND_S_SHIFT);
+ valp->normal = no;
break;
case ERTS_SCHED_DIRTY_CPU:
- val &= ~(((Uint32) ERTS_SCHDLR_SSPND_DCS_MASK)
- << ERTS_SCHDLR_SSPND_DCS_SHIFT);
- val |= ((no & ((Uint32) ERTS_SCHDLR_SSPND_DCS_MASK))
- << ERTS_SCHDLR_SSPND_DCS_SHIFT);
+ valp->dirty_cpu = no;
break;
case ERTS_SCHED_DIRTY_IO:
- val &= ~(((Uint32) ERTS_SCHDLR_SSPND_DIS_MASK)
- << ERTS_SCHDLR_SSPND_DIS_SHIFT);
- val |= ((no & ((Uint32) ERTS_SCHDLR_SSPND_DIS_MASK))
- << ERTS_SCHDLR_SSPND_DIS_SHIFT);
+ valp->dirty_io = no;
break;
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
}
-
- *valp = val;
}
static struct {
- erts_smp_mtx_t update_mtx;
- erts_smp_atomic32_t no_runqs;
+ erts_mtx_t update_mtx;
+ erts_atomic32_t no_runqs;
int last_active_runqs;
int forced_check_balance;
- erts_smp_atomic32_t checking_balance;
+ erts_atomic32_t checking_balance;
int halftime;
int full_reds_history_index;
struct {
@@ -381,39 +338,72 @@ do { \
balance_info.prev_rise.reds = (REDS); \
} while (0)
-#endif
erts_sched_stat_t erts_sched_stat;
-#ifdef USE_THREADS
static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key);
-#endif
-
-static erts_smp_atomic32_t function_calls;
-#ifdef ERTS_SMP
-static erts_smp_atomic32_t doing_sys_schedule;
-static erts_smp_atomic32_t no_empty_run_queues;
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+static erts_atomic32_t doing_sys_schedule;
+#endif
+static erts_atomic32_t no_empty_run_queues;
long erts_runq_supervision_interval = 0;
static ethr_event runq_supervision_event;
static erts_tid_t runq_supervisor_tid;
static erts_atomic_t runq_supervisor_sleeping;
-#else /* !ERTS_SMP */
-ErtsSchedulerData *erts_scheduler_data;
-#endif
-ErtsAlignedRunQueue *erts_aligned_run_queues;
-Uint erts_no_run_queues;
+ErtsAlignedRunQueue * ERTS_WRITE_UNLIKELY(erts_aligned_run_queues);
+Uint ERTS_WRITE_UNLIKELY(erts_no_run_queues);
+
+
+struct {
+ union {
+ erts_atomic32_t active;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } cpu;
+ union {
+ erts_atomic32_t active;
+ char align__[ERTS_CACHE_LINE_SIZE];
+ } io;
+} dirty_count erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+
+static ERTS_INLINE void
+dirty_active(ErtsSchedulerData *esdp, erts_aint32_t add)
+{
+ erts_aint32_t val;
+ erts_atomic32_t *ap;
+ switch (esdp->type) {
+ case ERTS_SCHED_DIRTY_CPU:
+ ap = &dirty_count.cpu.active;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ ap = &dirty_count.io.active;
+ break;
+ default:
+ ap = NULL;
+ ERTS_INTERNAL_ERROR("Not a dirty scheduler");
+ break;
+ }
-ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
-#ifdef ERTS_DIRTY_SCHEDULERS
-ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data;
-ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
+ /*
+ * All updates done under run-queue lock, so
+ * no inc or dec needed...
+ */
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(esdp->run_queue));
+
+ val = erts_atomic32_read_nob(ap);
+ val += add;
+ erts_atomic32_set_nob(ap, val);
+}
+
+ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_scheduler_data);
+ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_cpu_scheduler_data);
+ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_io_scheduler_data);
typedef union {
Process dsp;
char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(Process))];
} ErtsAlignedDirtyShadowProcess;
-#endif
typedef union {
ErtsSchedulerSleepInfo ssi;
@@ -421,12 +411,9 @@ typedef union {
} ErtsAlignedSchedulerSleepInfo;
static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_cpu_sched_sleep_info;
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info;
-#endif
-#endif
+static ErtsAlignedSchedulerSleepInfo *aligned_poll_thread_sleep_info;
static Uint last_reductions;
static Uint last_exact_reductions;
@@ -446,13 +433,14 @@ int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP;
#endif
typedef enum {
- ERTS_PSTT_GC, /* Garbage Collect */
+ ERTS_PSTT_GC_MAJOR, /* Garbage Collect: Fullsweep */
+ ERTS_PSTT_GC_MINOR, /* Garbage Collect: Generational */
ERTS_PSTT_CPC, /* Check Process Code */
-#ifdef ERTS_NEW_PURGE_STRATEGY
ERTS_PSTT_CLA, /* Copy Literal Area */
-#endif
ERTS_PSTT_COHMQ, /* Change off heap message queue */
- ERTS_PSTT_FTMQ /* Flush trace msg queue */
+ ERTS_PSTT_FTMQ, /* Flush trace msg queue */
+ ERTS_PSTT_ETS_FREE_FIXATION,
+ ERTS_PSTT_PRIO_SIG /* Elevate prio on signal management */
} ErtsProcSysTaskType;
#define ERTS_MAX_PROC_SYS_TASK_ARGS 2
@@ -494,11 +482,14 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
200,
ERTS_ALC_T_PROC_LIST)
+#define ERTS_POLL_THREAD_SLEEP_INFO_IX(IX) \
+ (ASSERT(0 <= ((int) (IX)) \
+ && ((int) (IX)) < ((int) erts_no_poll_threads)), \
+ &aligned_poll_thread_sleep_info[(IX)].ssi)
#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
- (ASSERT(-1 <= ((int) (IX)) \
- && ((int) (IX)) < ((int) erts_no_schedulers)), \
+ (ASSERT(((int)-1) <= ((int) (IX)) \
+ && ((int) (IX)) < ((int) erts_no_schedulers)), \
&aligned_sched_sleep_info[(IX)].ssi)
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(IX) \
(ASSERT(0 <= ((int) (IX)) \
&& ((int) (IX)) < ((int) erts_no_dirty_cpu_schedulers)), \
@@ -507,7 +498,6 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
(ASSERT(0 <= ((int) (IX)) \
&& ((int) (IX)) < ((int) erts_no_dirty_io_schedulers)), \
&aligned_dirty_io_sched_sleep_info[(IX)].ssi)
-#endif
#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
do { \
@@ -515,9 +505,9 @@ do { \
int ix__; \
for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \
RQVAR = ERTS_RUNQ_IX(ix__); \
- erts_smp_runq_lock(RQVAR); \
+ erts_runq_lock(RQVAR); \
{ DO; } \
- erts_smp_runq_unlock(RQVAR); \
+ erts_runq_unlock(RQVAR); \
} \
} while (0)
@@ -527,42 +517,47 @@ do { \
int ix__; \
int online__ = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, \
ERTS_SCHED_NORMAL); \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \
for (ix__ = 0; ix__ < online__; ix__++) { \
RQVAR = ERTS_RUNQ_IX(ix__); \
- erts_smp_runq_lock(RQVAR); \
+ erts_runq_lock(RQVAR); \
{ DO; } \
- erts_smp_runq_unlock(RQVAR); \
+ erts_runq_unlock(RQVAR); \
} \
} while (0)
-#define ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, DO, DOX) \
+#define ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, NRQS, DO, DOX) \
do { \
ErtsRunQueue *RQVAR; \
+ int nrqs = (NRQS); \
int ix__; \
- for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \
+ for (ix__ = 0; ix__ < nrqs; ix__++) { \
RQVAR = ERTS_RUNQ_IX(ix__); \
- erts_smp_runq_lock(RQVAR); \
+ erts_runq_lock(RQVAR); \
{ DO; } \
} \
{ DOX; } \
- for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) \
- erts_smp_runq_unlock(ERTS_RUNQ_IX(ix__)); \
+ for (ix__ = 0; ix__ < nrqs; ix__++) \
+ erts_runq_unlock(ERTS_RUNQ_IX(ix__)); \
} while (0)
-#define ERTS_ATOMIC_FOREACH_RUNQ(RQVAR, DO) \
- ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, DO, )
+#define ERTS_ATOMIC_FOREACH_RUNQ(RQVAR, DO) \
+ ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS, DO, )
+
+#define ERTS_ATOMIC_FOREACH_NORMAL_RUNQ(RQVAR, DO) \
+ ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, erts_no_run_queues, DO, )
+
+
/*
* Local functions.
*/
static void exec_misc_ops(ErtsRunQueue *);
-static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
-static int stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg);
+static void print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x);
+static int stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg);
static void aux_work_timeout(void *unused);
static void aux_work_timeout_early_init(int no_schedulers);
-static void aux_work_timeout_late_init(void);
static void setup_aux_work_timer(ErtsSchedulerData *esdp);
static int execute_sys_tasks(Process *c_p,
@@ -584,11 +579,8 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_MISC;
valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM;
valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
-#if ERTS_USE_ASYNC_READY_Q
valid |= ERTS_SSI_AUX_WORK_ASYNC_READY;
valid |= ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
-#endif
-#ifdef ERTS_SMP
valid |= ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP;
valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_DD;
@@ -596,8 +588,6 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS;
valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
- valid |= ERTS_SSI_AUX_WORK_PENDING_EXITERS;
-#endif
#if HAVE_ERTS_MSEG
valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
#endif
@@ -605,6 +595,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_REAP_PORTS;
#endif
valid |= ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
+ valid |= ERTS_SSI_AUX_WORK_YIELD;
if (~valid & value)
erts_exit(ERTS_ABORT_EXIT,
@@ -618,16 +609,13 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#define ERTS_DBG_CHK_SSI_AUX_WORK(SSI)
#endif
-#ifdef ERTS_SMP
-static void do_handle_pending_exiters(ErtsProcList *);
static void wake_scheduler(ErtsRunQueue *rq);
-#endif
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int
-erts_smp_lc_runq_is_locked(ErtsRunQueue *runq)
+erts_lc_runq_is_locked(ErtsRunQueue *runq)
{
- return erts_smp_lc_mtx_is_locked(&runq->mtx);
+ return erts_lc_mtx_is_locked(&runq->mtx);
}
#endif
@@ -635,13 +623,13 @@ erts_smp_lc_runq_is_locked(ErtsRunQueue *runq)
static ERTS_INLINE Uint64
ensure_later_proc_interval(Uint64 interval)
{
- return erts_smp_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval);
+ return erts_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval);
}
Uint64
erts_get_proc_interval(void)
{
- return erts_smp_current_interval_nob(erts_ptab_interval(&erts_proc));
+ return erts_current_interval_nob(erts_ptab_interval(&erts_proc));
}
Uint64
@@ -653,15 +641,13 @@ erts_ensure_later_proc_interval(Uint64 interval)
Uint64
erts_step_proc_interval(void)
{
- return erts_smp_step_interval_nob(erts_ptab_interval(&erts_proc));
+ return erts_step_interval_nob(erts_ptab_interval(&erts_proc));
}
void
erts_pre_init_process(void)
{
-#ifdef USE_THREADS
erts_tsd_key_create(&sched_data_key, "erts_sched_data_key");
-#endif
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX]
= "DELAYED_AW_WAKEUP";
@@ -687,12 +673,12 @@ erts_pre_init_process(void)
= "MISC_THR_PRGR";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_IX]
= "MISC";
- erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX]
- = "PENDING_EXITERS";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_SET_TMO_IX]
= "SET_TMO";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX]
= "MSEG_CACHE_CHECK";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_YIELD_IX]
+ = "YIELD";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_REAP_PORTS_IX]
= "REAP_PORTS";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX]
@@ -729,6 +715,26 @@ erts_pre_init_process(void)
= ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS;
erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks
= ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_ETS_OWNED_TABLES].get_locks
+ = ERTS_PSD_ETS_OWNED_TABLES_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_ETS_OWNED_TABLES].set_locks
+ = ERTS_PSD_ETS_OWNED_TABLES_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].get_locks
+ = ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].set_locks
+ = ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].get_locks
+ = ERTS_PSD_DIST_ENTRY_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks
+ = ERTS_PSD_DIST_ENTRY_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_PENDING_SUSPEND].get_locks
+ = ERTS_PSD_PENDING_SUSPEND_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_PENDING_SUSPEND].set_locks
+ = ERTS_PSD_PENDING_SUSPEND_SET_LOCKS;
#endif
}
@@ -743,10 +749,7 @@ void
erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
{
-#ifdef ERTS_SMP
- erts_disable_proc_not_running_opt = 0;
erts_init_proc_lock(ncpu);
-#endif
init_proclist_alloc();
@@ -758,11 +761,7 @@ erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
sizeof(Process),
"process_table",
legacy_proc_tab,
-#ifdef ERTS_SMP
1
-#else
- 0
-#endif
);
last_reductions = 0;
@@ -774,7 +773,9 @@ erts_late_init_process(void)
{
int ix;
- erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat");
+ erts_spinlock_init(&erts_sched_stat.lock, "sched_stat", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+
for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
Eterm atom;
char *atom_str;
@@ -809,15 +810,26 @@ erts_late_init_process(void)
}
+#define ERTS_SCHED_WTIME_IDLE ~((Uint64) 0)
+
static void
-init_sched_wall_time(ErtsSchedWallTime *swtp)
+init_sched_wall_time(ErtsSchedulerData *esdp, Uint64 time_stamp)
{
- swtp->need = erts_sched_balance_util;
- swtp->enabled = 0;
- swtp->start = 0;
- swtp->working.total = 0;
- swtp->working.start = 0;
- swtp->working.currently = 0;
+ if (esdp->type != ERTS_SCHED_NORMAL) {
+ erts_atomic32_init_nob(&esdp->sched_wall_time.u.mod, 0);
+ esdp->sched_wall_time.enabled = 1;
+ esdp->sched_wall_time.start = time_stamp;
+ esdp->sched_wall_time.working.total = 0;
+ esdp->sched_wall_time.working.start = ERTS_SCHED_WTIME_IDLE;
+ }
+ else
+ {
+ esdp->sched_wall_time.u.need = erts_sched_balance_util;
+ esdp->sched_wall_time.enabled = 0;
+ esdp->sched_wall_time.start = 0;
+ esdp->sched_wall_time.working.total = 0;
+ esdp->sched_wall_time.working.start = 0;
+ }
}
static ERTS_INLINE Uint64
@@ -959,14 +971,14 @@ erts_get_sched_util(ErtsRunQueue *rq, int initially_locked, int short_interval)
if (!locked) {
if (++try >= ERTS_GET_AVG_MAX_UNLOCKED_TRY) {
/* Writer will eventually block on runq-lock */
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
locked = 1;
}
}
}
if (!initially_locked && locked)
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
now = sched_wall_time_ts();
worktime = calc_sched_worktime(is_working, now, last, interval, old_worktime);
@@ -1008,31 +1020,139 @@ init_runq_sched_util(ErtsRunQueueSchedUtil *rqsu, int enabled)
#endif /* ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT */
-static ERTS_INLINE void
+
+typedef struct {
+ Uint64 working;
+ Uint64 total;
+} ErtsDirtySchedWallTime;
+
+static void
+read_dirty_sched_wall_time(ErtsSchedulerData *esdp, ErtsDirtySchedWallTime *info)
+{
+ erts_aint32_t mod1;
+ Uint64 working, start, ts;
+
+ mod1 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+
+ while (1) {
+ erts_aint32_t mod2;
+
+ /* Spin until values are not written... */
+ while (1) {
+ if ((mod1 & 1) == 0)
+ break;
+ ERTS_SPIN_BODY;
+ mod1 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+ }
+
+ ERTS_THR_READ_MEMORY_BARRIER;
+
+ working = esdp->sched_wall_time.working.total;
+ start = esdp->sched_wall_time.working.start;
+
+ ERTS_THR_READ_MEMORY_BARRIER;
+
+ mod2 = erts_atomic32_read_nob(&esdp->sched_wall_time.u.mod);
+ if (mod1 == mod2)
+ break;
+ mod1 = mod2;
+ }
+
+ ts = sched_wall_time_ts();
+ ts -= esdp->sched_wall_time.start;
+
+ info->total = ts;
+
+ if (start == ERTS_SCHED_WTIME_IDLE || ts < start)
+ info->working = working;
+ else
+ info->working = working + (ts - start);
+
+ if (info->working > info->total)
+ info->working = info->total;
+}
+
+
+
+static void
+dirty_sched_wall_time_change(ErtsSchedulerData *esdp, int working)
+{
+ erts_aint32_t mod;
+ Uint64 ts = sched_wall_time_ts();
+
+ ts -= esdp->sched_wall_time.start;
+
+ /*
+ * This thread is the only thread writing in
+ * this sched_wall_time struct. We set 'mod' to
+ * an odd value while writing...
+ */
+ mod = erts_atomic32_read_dirty(&esdp->sched_wall_time.u.mod);
+ ASSERT((mod & 1) == 0);
+ mod++;
+
+ erts_atomic32_set_nob(&esdp->sched_wall_time.u.mod, mod);
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+
+ if (working) {
+ ASSERT(esdp->sched_wall_time.working.start
+ == ERTS_SCHED_WTIME_IDLE);
+
+ esdp->sched_wall_time.working.start = ts;
+
+ }
+ else {
+ Uint64 total;
+
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
+
+ total = esdp->sched_wall_time.working.total;
+ total += ts - esdp->sched_wall_time.working.start;
+
+ esdp->sched_wall_time.working.total = total;
+ esdp->sched_wall_time.working.start = ERTS_SCHED_WTIME_IDLE;
+
+
+ }
+
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+ mod++;
+ erts_atomic32_set_nob(&esdp->sched_wall_time.u.mod, mod);
+
+ if (!working) {
+ ERTS_MSACC_SET_STATE_X(ERTS_MSACC_STATE_BUSY_WAIT);
+ } else {
+ ERTS_MSACC_SET_STATE_X(ERTS_MSACC_STATE_OTHER);
+ }
+}
+
+
+static void
sched_wall_time_change(ErtsSchedulerData *esdp, int working)
{
- if (esdp->sched_wall_time.need) {
+ if (esdp->sched_wall_time.u.need) {
Uint64 ts = sched_wall_time_ts();
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
- update_avg_sched_util(esdp, ts, working);
+ update_avg_sched_util(esdp, ts, working);
#endif
if (esdp->sched_wall_time.enabled) {
if (working) {
-#ifdef DEBUG
- ASSERT(!esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 1;
-#endif
+ ASSERT(esdp->sched_wall_time.working.start
+ == ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
esdp->sched_wall_time.working.start = ts;
}
else {
-#ifdef DEBUG
- ASSERT(esdp->sched_wall_time.working.currently);
- esdp->sched_wall_time.working.currently = 0;
-#endif
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
ts -= esdp->sched_wall_time.working.start;
esdp->sched_wall_time.working.total += ts;
+#ifdef DEBUG
+ esdp->sched_wall_time.working.start
+ = ERTS_SCHED_WTIME_IDLE;
+#endif
}
}
}
@@ -1049,17 +1169,19 @@ typedef struct {
int enable;
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
+ int want_dirty_cpu;
+ int want_dirty_io;
} ErtsSchedWallTimeReq;
typedef struct {
Process *proc;
Eterm ref;
- Eterm ref_heap[REF_THING_SIZE];
+ Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
} ErtsSystemCheckReq;
@@ -1073,6 +1195,7 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(screq,
5,
ERTS_ALC_T_SYS_CHECK_REQ)
+
static void
reply_sched_wall_time(void *vswtrp)
{
@@ -1090,29 +1213,27 @@ reply_sched_wall_time(void *vswtrp)
ErlOffHeap *ohp = NULL;
ErtsMessage *mp = NULL;
- ASSERT(esdp);
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+
if (swtrp->set) {
if (!swtrp->enable && esdp->sched_wall_time.enabled) {
- esdp->sched_wall_time.need = erts_sched_balance_util;
+ esdp->sched_wall_time.u.need = erts_sched_balance_util;
esdp->sched_wall_time.enabled = 0;
}
else if (swtrp->enable && !esdp->sched_wall_time.enabled) {
Uint64 ts = sched_wall_time_ts();
- esdp->sched_wall_time.need = 1;
+ esdp->sched_wall_time.u.need = 1;
esdp->sched_wall_time.enabled = 1;
esdp->sched_wall_time.start = ts;
esdp->sched_wall_time.working.total = 0;
esdp->sched_wall_time.working.start = 0;
- esdp->sched_wall_time.working.currently = 1;
}
}
if (esdp->sched_wall_time.enabled) {
Uint64 ts = sched_wall_time_ts();
- ASSERT(esdp->sched_wall_time.working.currently);
+ ASSERT(esdp->sched_wall_time.working.start
+ != ERTS_SCHED_WTIME_IDLE);
ts -= esdp->sched_wall_time.start;
total = ts;
ts -= esdp->sched_wall_time.working.start;
@@ -1123,30 +1244,115 @@ reply_sched_wall_time(void *vswtrp)
hpp = NULL;
szp = &sz;
- while (1) {
- if (hpp)
- ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
- else
- *szp += REF_THING_SIZE;
+ if (esdp->sched_wall_time.enabled
+ && swtrp->req_sched == esdp->no
+ && (swtrp->want_dirty_cpu || swtrp->want_dirty_io)) {
+ /* Reply with info about this scheduler and all dirty schedulers... */
+ ErtsDirtySchedWallTime *dswt;
+ int ix, no_dirty_scheds, want_dcpu, want_dio, soffset;
+
+ want_dcpu = swtrp->want_dirty_cpu;
+ want_dio = swtrp->want_dirty_io;
+
+ no_dirty_scheds = 0;
+ if (want_dcpu)
+ no_dirty_scheds += erts_no_dirty_cpu_schedulers;
+ if (want_dio)
+ no_dirty_scheds += erts_no_dirty_io_schedulers;
+
+ ASSERT(no_dirty_scheds);
+
+ dswt = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(ErtsDirtySchedWallTime)
+ * no_dirty_scheds);
+
+ for (ix = 0; ix < no_dirty_scheds; ix++) {
+ ErtsSchedulerData *esdp;
+ if (want_dcpu && ix < erts_no_dirty_cpu_schedulers)
+ esdp = &erts_aligned_dirty_cpu_scheduler_data[ix].esd;
+ else {
+ int dio_ix = ix - erts_no_dirty_cpu_schedulers;
+ esdp = &erts_aligned_dirty_io_scheduler_data[dio_ix].esd;
+ }
+ read_dirty_sched_wall_time(esdp, &dswt[ix]);
+ }
- if (swtrp->set)
- msg = ref_copy;
- else {
- msg = (!esdp->sched_wall_time.enabled
- ? am_notsup
- : erts_bld_tuple(hpp, szp, 3,
- make_small(esdp->no),
- erts_bld_uint64(hpp, szp, working),
- erts_bld_uint64(hpp, szp, total)));
+ soffset = erts_no_schedulers + 1;
- msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
- }
- if (hpp)
- break;
+ if (!want_dcpu) {
+ ASSERT(want_dio);
+ soffset += erts_no_dirty_cpu_schedulers;
+ }
- mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
- szp = NULL;
- hpp = &hp;
+ while (1) {
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
+ else
+ *szp += ERTS_REF_THING_SIZE;
+
+ ASSERT(!swtrp->set);
+
+ /* info about dirty schedulers... */
+ msg = NIL;
+ for (ix = no_dirty_scheds-1; ix >= 0; ix--) {
+ msg = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 3,
+ make_small(ix+soffset),
+ erts_bld_uint64(hpp, szp,
+ dswt[ix].working),
+ erts_bld_uint64(hpp, szp,
+ dswt[ix].total)),
+ msg);
+ }
+ /* info about this scheduler... */
+ msg = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 3,
+ make_small(esdp->no),
+ erts_bld_uint64(hpp, szp, working),
+ erts_bld_uint64(hpp, szp, total)),
+ msg);
+
+ msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
+
+ if (hpp)
+ break;
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ szp = NULL;
+ hpp = &hp;
+ }
+
+ erts_free(ERTS_ALC_T_TMP, dswt);
+ }
+ else
+ {
+ /* Reply with info about this scheduler only... */
+
+ while (1) {
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
+ else
+ *szp += ERTS_REF_THING_SIZE;
+
+ if (swtrp->set)
+ msg = ref_copy;
+ else {
+ msg = (!esdp->sched_wall_time.enabled
+ ? am_undefined
+ : erts_bld_tuple(hpp, szp, 3,
+ make_small(esdp->no),
+ erts_bld_uint64(hpp, szp, working),
+ erts_bld_uint64(hpp, szp, total)));
+
+ msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
+ }
+ if (hpp)
+ break;
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ szp = NULL;
+ hpp = &hp;
+ }
}
erts_queue_message(rp, rp_locks, mp, msg, am_system);
@@ -1155,27 +1361,27 @@ reply_sched_wall_time(void *vswtrp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
erts_proc_dec_refc(rp);
- if (erts_smp_atomic32_dec_read_nob(&swtrp->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&swtrp->refc) == 0)
swtreq_free(vswtrp);
}
Eterm
-erts_sched_wall_time_request(Process *c_p, int set, int enable)
+erts_sched_wall_time_request(Process *c_p, int set, int enable,
+ int want_dirty_cpu, int want_dirty_io)
{
ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
ErtsSchedWallTimeReq *swtrp;
Eterm *hp;
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
+
if (!set && !esdp->sched_wall_time.enabled)
return THE_NON_VALUE;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
swtrp = swtreq_alloc();
ref = erts_make_ref(c_p);
@@ -1186,18 +1392,18 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable)
swtrp->proc = c_p;
swtrp->ref = STORE_NC(&hp, NULL, ref);
swtrp->req_sched = esdp->no;
- erts_smp_atomic32_init_nob(&swtrp->refc,
+ swtrp->want_dirty_cpu = want_dirty_cpu;
+ swtrp->want_dirty_io = want_dirty_io;
+ erts_atomic32_init_nob(&swtrp->refc,
(erts_aint32_t) erts_no_schedulers);
erts_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_sched_wall_time,
(void *) swtrp);
-#endif
reply_sched_wall_time((void *) swtrp);
@@ -1218,12 +1424,9 @@ reply_system_check(void *vscrp)
ErlOffHeap *ohp = NULL;
ErtsMessage *mp = NULL;
- ASSERT(esdp);
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
- sz = REF_THING_SIZE;
+ sz = ERTS_REF_THING_SIZE;
mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
hpp = &hp;
msg = STORE_NC(hpp, ohp, scrp->ref);
@@ -1234,11 +1437,11 @@ reply_system_check(void *vscrp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
erts_proc_dec_refc(rp);
- if (erts_smp_atomic32_dec_read_nob(&scrp->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&scrp->refc) == 0)
screq_free(vscrp);
}
@@ -1256,17 +1459,15 @@ Eterm erts_system_check_request(Process *c_p) {
scrp->proc = c_p;
scrp->ref = STORE_NC(&hp, NULL, ref);
scrp->req_sched = esdp->no;
- erts_smp_atomic32_init_nob(&scrp->refc, (erts_aint32_t) erts_no_schedulers);
+ erts_atomic32_init_nob(&scrp->refc, (erts_aint32_t) erts_no_schedulers);
erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_system_check,
(void *) scrp);
-#endif
reply_system_check((void *) scrp);
@@ -1299,15 +1500,15 @@ proclist_destroy(ErtsProcList *plp)
}
ErtsProcList *
-erts_proclist_copy(ErtsProcList *plp)
+erts_proclist_create(Process *p)
{
- return proclist_copy(plp);
+ return proclist_create(p);
}
ErtsProcList *
-erts_proclist_create(Process *p)
+erts_proclist_copy(ErtsProcList *plp)
{
- return proclist_create(p);
+ return proclist_copy(plp);
}
void
@@ -1316,6 +1517,20 @@ erts_proclist_destroy(ErtsProcList *plp)
proclist_destroy(plp);
}
+void
+erts_proclist_dump(fmtfn_t to, void *to_arg, ErtsProcList *plp)
+{
+ ErtsProcList *first = plp;
+
+ while (plp) {
+ erts_print(to, to_arg, "%T", plp->pid);
+ plp = plp->next;
+ if (plp == first)
+ break;
+ }
+ erts_print(to, to_arg, "\n");
+}
+
void *
erts_psd_set_init(Process *p, int ix, void *data)
{
@@ -1327,7 +1542,7 @@ erts_psd_set_init(Process *p, int ix, void *data)
for (i = 0; i < ERTS_PSD_SIZE; i++)
new_psd->data[i] = NULL;
- psd = (ErtsPSD *) erts_smp_atomic_cmpxchg_mb(&p->psd,
+ psd = (ErtsPSD *) erts_atomic_cmpxchg_mb(&p->psd,
(erts_aint_t) new_psd,
(erts_aint_t) NULL);
if (psd)
@@ -1339,21 +1554,21 @@ erts_psd_set_init(Process *p, int ix, void *data)
return old;
}
-#ifdef ERTS_SMP
void
-erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
+erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi,
+ erts_aint32_t flags)
{
switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
case ERTS_SSI_FLG_POLL_SLEEPING:
- erts_sys_schedule_interrupt(1);
+ erts_check_io_interrupt(ssi->psi, 1);
break;
case ERTS_SSI_FLG_POLL_SLEEPING|ERTS_SSI_FLG_TSE_SLEEPING:
/*
* Thread progress blocking while poll sleeping; need
* to signal on both...
*/
- erts_sys_schedule_interrupt(1);
+ erts_check_io_interrupt(ssi->psi, 1);
/* fall through */
case ERTS_SSI_FLG_TSE_SLEEPING:
erts_tse_set(ssi->event);
@@ -1367,7 +1582,6 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
}
}
-#endif
static ERTS_INLINE void
set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi,
@@ -1383,11 +1597,7 @@ set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi,
old_flgs = erts_atomic32_read_bor_nob(&ssi->aux_work, flgs);
if ((old_flgs & flgs) != flgs) {
-#ifdef ERTS_SMP
erts_sched_poke(ssi);
-#else
- erts_sys_schedule_interrupt(1);
-#endif
}
}
}
@@ -1403,11 +1613,7 @@ set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi,
old_flgs = erts_atomic32_read_bor_relb(&ssi->aux_work, flgs);
if ((old_flgs & flgs) != flgs) {
-#ifdef ERTS_SMP
erts_sched_poke(ssi);
-#else
- erts_sys_schedule_interrupt(1);
-#endif
}
}
@@ -1423,7 +1629,6 @@ unset_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs)
return erts_atomic32_read_band_nob(&ssi->aux_work, ~flgs);
}
-#ifdef ERTS_SMP
static ERTS_INLINE void
haw_chk_later_cleanup_op_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
@@ -1443,7 +1648,7 @@ haw_thr_prgr_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
awdp->latest_wakeup = val;
haw_chk_later_cleanup_op_wakeup(awdp, val);
}
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
@@ -1453,7 +1658,7 @@ haw_thr_prgr_soft_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
if (erts_thr_progress_cmp(val, awdp->latest_wakeup) > 0) {
awdp->latest_wakeup = val;
haw_chk_later_cleanup_op_wakeup(awdp, val);
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
@@ -1467,7 +1672,7 @@ haw_thr_prgr_later_cleanup_op_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val,
else {
awdp->latest_wakeup = val;
awdp->later_op.size = thr_prgr_later_cleanup_op_threshold;
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
}
@@ -1493,9 +1698,9 @@ static ERTS_INLINE void
haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp)
{
ErtsThrPrgrVal current = awdp->current_thr_prgr;
-#ifdef ERTS_DIRTY_SCHEDULERS
+
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
if (current != ERTS_THR_PRGR_INVALID
&& !erts_thr_progress_equal(current, erts_thr_progress_current())) {
/*
@@ -1512,9 +1717,7 @@ handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, in
{
int jix, max_jix;
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY);
@@ -1572,7 +1775,6 @@ schedule_aux_work_wakeup(ErtsAuxWorkData *awdp,
}
}
-#endif
typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t;
struct erts_misc_aux_work_t_ {
@@ -1613,11 +1815,7 @@ init_misc_aux_work(void)
sizeof(erts_algnd_misc_aux_work_q_t)
* (erts_no_schedulers+1));
-#ifdef ERTS_SMP
ix = 0; /* aux_thread + schedulers */
-#else
- ix = 1; /* scheduler only */
-#endif
for (; ix <= erts_no_schedulers; ix++) {
qinit.arg = (void *) ERTS_SCHED_SLEEP_INFO_IX(ix-1);
@@ -1635,10 +1833,8 @@ misc_aux_work_clean(ErtsThrQ_t *q,
set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC);
return aux_work | ERTS_SSI_AUX_WORK_MISC;
case ERTS_THR_Q_NEED_THR_PRGR:
-#ifdef ERTS_SMP
set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
haw_thr_prgr_soft_wakeup(awdp, erts_thr_q_need_thr_progress(q));
-#endif
case ERTS_THR_Q_CLEAN:
break;
}
@@ -1664,16 +1860,14 @@ handle_misc_aux_work(ErtsAuxWorkData *awdp,
return misc_aux_work_clean(q, awdp, aux_work & ~ERTS_SSI_AUX_WORK_MISC);
}
-#ifdef ERTS_SMP
static ERTS_INLINE erts_aint32_t
handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
erts_aint32_t aux_work,
int waiting)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->misc.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
@@ -1685,7 +1879,6 @@ handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
}
-#endif
static ERTS_INLINE void
schedule_misc_aux_work(int sched_id,
@@ -1695,11 +1888,7 @@ schedule_misc_aux_work(int sched_id,
ErtsThrQ_t *q;
erts_misc_aux_work_t *mawp;
-#ifdef ERTS_SMP
ASSERT(0 <= sched_id && sched_id <= erts_no_schedulers);
-#else
- ASSERT(sched_id == 1);
-#endif
q = &misc_aux_work_queues[sched_id].q;
mawp = misc_aux_work_alloc();
@@ -1725,12 +1914,13 @@ erts_schedule_multi_misc_aux_work(int ignore_self,
int id, self = 0;
if (ignore_self) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
- if (esdp)
- self = (int) esdp->no;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ /* ignore_self is meaningless on dirty schedulers since aux work can
+ * only run on normal schedulers, and their ids do not translate. */
+ if(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ self = (int)esdp->no;
+ }
}
ASSERT(0 < max_sched && max_sched <= erts_no_schedulers);
@@ -1742,7 +1932,6 @@ erts_schedule_multi_misc_aux_work(int ignore_self,
}
}
-#if ERTS_USE_ASYNC_READY_Q
void
erts_notify_check_async_ready_queue(void *vno)
@@ -1758,9 +1947,9 @@ handle_async_ready(ErtsAuxWorkData *awdp,
int waiting)
{
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
-#ifdef ERTS_DIRTY_SCHEDULERS
+
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY);
if (erts_check_async_ready(awdp->async_ready.queue)) {
if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY)
@@ -1770,9 +1959,7 @@ handle_async_ready(ErtsAuxWorkData *awdp,
}
return aux_work;
}
-#ifdef ERTS_SMP
awdp->async_ready.need_thr_prgr = 0;
-#endif
set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
return ((aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY)
| ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
@@ -1785,10 +1972,8 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
{
void *thr_prgr_p;
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
-#ifdef ERTS_SMP
+
if (awdp->async_ready.need_thr_prgr
&& !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->async_ready.thr_prgr)) {
@@ -1797,26 +1982,20 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
awdp->async_ready.need_thr_prgr = 0;
thr_prgr_p = (void *) &awdp->async_ready.thr_prgr;
-#else
- thr_prgr_p = NULL;
-#endif
switch (erts_async_ready_clean(awdp->async_ready.queue, thr_prgr_p)) {
case ERTS_ASYNC_READY_CLEAN:
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
-#ifdef ERTS_SMP
case ERTS_ASYNC_READY_NEED_THR_PRGR:
haw_thr_prgr_soft_wakeup(awdp, awdp->async_ready.thr_prgr);
awdp->async_ready.need_thr_prgr = 1;
return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
-#endif
default:
return aux_work;
}
}
-#endif /* ERTS_USE_ASYNC_READY_Q */
static ERTS_INLINE erts_aint32_t
@@ -1825,9 +2004,8 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
erts_aint32_t res;
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
| ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC));
aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
@@ -1841,7 +2019,6 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
return aux_work;
}
-#ifdef ERTS_SMP
void
erts_alloc_notify_delayed_dealloc(int ix)
@@ -1875,9 +2052,9 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
int more_work = 0;
ERTS_MSACC_PUSH_STATE_M_X();
-#ifdef ERTS_DIRTY_SCHEDULERS
+
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ALLOC);
erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
@@ -1914,9 +2091,8 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
@@ -1973,9 +2149,8 @@ handle_canceled_timers(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
int more_work = 0;
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS);
erts_handle_canceled_timers((void *) awdp->esdp,
&need_thr_progress,
@@ -2009,9 +2184,8 @@ handle_canceled_timers_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
if (!erts_thr_progress_has_reached_this(current, awdp->cncld_tmrs.thr_prgr))
return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
@@ -2054,11 +2228,11 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
int lops;
ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
+
for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
+
if (!erts_thr_progress_has_reached_this(current, lop->later))
return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
awdp->later_op.first = lop->next;
@@ -2085,7 +2259,7 @@ enqueue_later_op(ErtsSchedulerData *esdp,
ErtsThrPrgrLaterOp *lop)
{
ErtsThrPrgrVal later = erts_thr_progress_later(esdp);
- ASSERT(esdp);
+ ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp));
lop->func = later_func;
lop->data = later_data;
@@ -2101,20 +2275,15 @@ enqueue_later_op(ErtsSchedulerData *esdp,
return later;
}
-#endif /* ERTS_SMP */
void
erts_schedule_thr_prgr_later_op(void (*later_func)(void *),
void *later_data,
ErtsThrPrgrLaterOp *lop)
{
-#ifndef ERTS_SMP
- later_func(later_data);
-#else
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop);
haw_thr_prgr_wakeup(&esdp->aux_work_data, later);
-#endif
}
void
@@ -2123,13 +2292,9 @@ erts_schedule_thr_prgr_later_cleanup_op(void (*later_func)(void *),
ErtsThrPrgrLaterOp *lop,
UWord size)
{
-#ifndef ERTS_SMP
- later_func(later_data);
-#else
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop);
haw_thr_prgr_later_cleanup_op_wakeup(&esdp->aux_work_data, later, size);
-#endif
}
static ERTS_INLINE erts_aint32_t
@@ -2138,9 +2303,7 @@ handle_debug_wait_completed(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int w
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
erts_aint32_t saved_aux_work, flags;
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#endif
flags = awdp->debug.wait_completed.flags;
@@ -2181,11 +2344,7 @@ setup_thr_debug_wait_completed(void *vproc)
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsAuxWorkData *awdp;
erts_aint32_t wait_flags, aux_work_flags;
-#ifdef ERTS_SMP
awdp = esdp ? &esdp->aux_work_data : aux_thread_aux_work_data;
-#else
- awdp = &esdp->aux_work_data;
-#endif
wait_flags = 0;
aux_work_flags = ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
@@ -2194,18 +2353,14 @@ setup_thr_debug_wait_completed(void *vproc)
erts_alloc_fix_alloc_shrink(awdp->sched_id, 0);
wait_flags |= (ERTS_SSI_AUX_WORK_DD
| ERTS_SSI_AUX_WORK_DD_THR_PRGR);
-#ifdef ERTS_SMP
aux_work_flags |= ERTS_SSI_AUX_WORK_DD;
-#endif
}
if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS) {
wait_flags |= (ERTS_SSI_AUX_WORK_CNCLD_TMRS
| ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR);
-#ifdef ERTS_SMP
if (awdp->esdp && !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp))
aux_work_flags |= ERTS_SSI_AUX_WORK_CNCLD_TMRS;
-#endif
}
set_aux_work_flags_wakeup_nob(awdp->ssi, aux_work_flags);
@@ -2224,21 +2379,17 @@ static void later_thr_debug_wait_completed(void *vlop)
{
struct debug_lop *lop = vlop;
erts_aint32_t count = (erts_aint32_t) erts_no_schedulers;
-#ifdef ERTS_SMP
count += 1; /* aux thread */
-#endif
if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == count) {
/* scheduler threads */
erts_schedule_multi_misc_aux_work(0,
erts_no_schedulers,
setup_thr_debug_wait_completed,
lop->proc);
-#ifdef ERTS_SMP
/* aux_thread */
erts_schedule_misc_aux_work(0,
setup_thr_debug_wait_completed,
lop->proc);
-#endif
}
erts_free(ERTS_ALC_T_DEBUG, lop);
}
@@ -2259,9 +2410,7 @@ erts_debug_wait_completed(Process *c_p, int flags)
{
/* Only one process at a time can do this */
erts_aint32_t count = (erts_aint32_t) (2*erts_no_schedulers);
-#ifdef ERTS_SMP
count += 1; /* aux thread */
-#endif
if (0 == erts_atomic32_cmpxchg_mb(&debug_wait_completed_count,
count,
0)) {
@@ -2290,17 +2439,18 @@ notify_reap_ports_relb(void)
}
}
-erts_smp_atomic32_t erts_halt_progress;
+erts_atomic32_t erts_halt_progress;
int erts_halt_code;
static ERTS_INLINE erts_aint32_t
handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS);
- awdp->esdp->run_queue->halt_in_progress = 1;
- if (erts_smp_atomic32_dec_read_acqb(&erts_halt_progress) == 0) {
+ ERTS_RUNQ_FLGS_SET(awdp->esdp->run_queue, ERTS_RUNQ_FLG_HALTING);
+
+ if (erts_atomic32_dec_read_acqb(&erts_halt_progress) == 0) {
int i, max = erts_ptab_max(&erts_port);
- erts_smp_atomic32_set_nob(&erts_halt_progress, 1);
+ erts_atomic32_set_nob(&erts_halt_progress, 1);
for (i = 0; i < max; i++) {
erts_aint32_t state;
Port *prt = erts_pix2port(i);
@@ -2313,63 +2463,84 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
/* We need to set the halt flag - get the port lock */
- erts_smp_port_lock(prt);
+ erts_port_lock(prt);
state = erts_atomic32_read_nob(&prt->state);
if (!(state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
| ERTS_PORT_SFLG_HALT))) {
state = erts_atomic32_read_bor_relb(&prt->state,
ERTS_PORT_SFLG_HALT);
- erts_smp_atomic32_inc_nob(&erts_halt_progress);
+ erts_atomic32_inc_nob(&erts_halt_progress);
if (!(state & (ERTS_PORT_SFLG_EXITING|ERTS_PORT_SFLG_CLOSING)))
erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1);
}
erts_port_release(prt);
}
- if (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0) {
+ if (erts_atomic32_dec_read_nob(&erts_halt_progress) == 0) {
erts_flush_async_exit(erts_halt_code, "");
}
}
return aux_work & ~ERTS_SSI_AUX_WORK_REAP_PORTS;
}
-#if HAVE_ERTS_MSEG
+void
+erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp)
+{
+ ASSERT(esdp == erts_get_scheduler_data());
+ /* Always called by the scheduler itself... */
+ set_aux_work_flags_wakeup_nob(esdp->ssi, ERTS_SSI_AUX_WORK_YIELD);
+}
static ERTS_INLINE erts_aint32_t
-handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+handle_yield(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
- unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK);
- erts_mseg_cache_check();
- return aux_work & ~ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
-}
+ int yield = 0;
+ /*
+ * Yield operations are always requested by the scheduler itself.
+ *
+ * The following handlers should *not* set the ERTS_SSI_AUX_WORK_YIELD
+ * flag in order to indicate more work. They should instead return
+ * information so this "main handler" can manipulate the flag...
+ *
+ * The following handlers should be able to handle being called
+ * even though no work is to be done...
+ */
-#endif
+ /* Various yielding operations... */
-#ifdef ERTS_SMP
+ yield |= erts_handle_yielded_ets_all_request(awdp->esdp,
+ &awdp->yield.ets_all);
+ yield |= erts_handle_yielded_alcu_blockscan(awdp->esdp,
+ &awdp->yield.alcu_blockscan);
-static ERTS_INLINE erts_aint32_t
-handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
-{
- ErtsProcList *pnd_xtrs;
- ErtsRunQueue *rq;
+ /*
+ * Other yielding operations...
+ *
+ */
+
+ if (!yield) {
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_YIELD);
+ return aux_work & ~ERTS_SSI_AUX_WORK_YIELD;
+ }
- rq = awdp->esdp->run_queue;
- unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS);
+ return aux_work;
+}
- erts_smp_runq_lock(rq);
- pnd_xtrs = rq->procs.pending_exiters;
- rq->procs.pending_exiters = NULL;
- erts_smp_runq_unlock(rq);
- if (erts_proclist_fetch(&pnd_xtrs, NULL))
- do_handle_pending_exiters(pnd_xtrs);
+#if HAVE_ERTS_MSEG
- return aux_work & ~ERTS_SSI_AUX_WORK_PENDING_EXITERS;
+static ERTS_INLINE erts_aint32_t
+handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK);
+ erts_mseg_cache_check();
+ return aux_work & ~ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
}
#endif
+
static ERTS_INLINE erts_aint32_t
handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
@@ -2400,9 +2571,7 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_AUX);
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#ifdef ERTS_SMP
haw_thr_prgr_current_reset(awdp);
-#endif
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
ASSERT(aux_work);
@@ -2421,7 +2590,6 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
* Keep ERTS_SSI_AUX_WORK flags in expected frequency order relative
* eachother. Most frequent first.
*/
-#ifdef ERTS_SMP
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP,
handle_delayed_aux_work_wakeup);
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD,
@@ -2429,13 +2597,11 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
/* DD must be before DD_THR_PRGR */
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD_THR_PRGR,
handle_delayed_dealloc_thr_prgr);
-#endif
HANDLE_AUX_WORK((ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
| ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
handle_fix_alloc);
-#ifdef ERTS_SMP
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP,
handle_thr_prgr_later_op);
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS,
@@ -2443,29 +2609,19 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
/* CNCLD_TMRS must be before CNCLD_TMRS_THR_PRGR */
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR,
handle_canceled_timers_thr_prgr);
-#endif
-#if ERTS_USE_ASYNC_READY_Q
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY,
handle_async_ready);
/* ASYNC_READY must be before ASYNC_READY_CLEAN */
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN,
handle_async_ready_clean);
-#endif
-#ifdef ERTS_SMP
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC_THR_PRGR,
handle_misc_aux_work_thr_prgr);
-#endif
/* MISC_THR_PRGR must be before MISC */
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC,
handle_misc_aux_work);
-#ifdef ERTS_SMP
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_PENDING_EXITERS,
- handle_pending_exiters);
-#endif
-
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO,
handle_setup_aux_work_timer);
@@ -2474,6 +2630,9 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
handle_mseg_cache_check);
#endif
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_YIELD,
+ handle_yield);
+
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS,
handle_reap_ports);
@@ -2487,10 +2646,8 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
-#ifdef ERTS_SMP
if (waiting && !aux_work)
haw_thr_prgr_current_check_progress(awdp);
-#endif
ERTS_MSACC_UPDATE_CACHE();
ERTS_MSACC_POP_STATE_M();
@@ -2508,6 +2665,9 @@ typedef struct {
int initialized;
erts_atomic32_t refc;
+#ifdef DEBUG
+ erts_atomic32_t used;
+#endif
erts_atomic32_t type[1];
} ErtsAuxWorkTmo;
@@ -2517,6 +2677,13 @@ static ERTS_INLINE void
start_aux_work_timer(ErtsSchedulerData *esdp)
{
ErtsMonotonicTime tmo = erts_get_monotonic_time(esdp);
+#ifdef DEBUG
+ Uint no = (Uint) erts_atomic32_xchg_mb(&aux_work_tmo->used,
+ (erts_aint32_t) esdp->no);
+ ASSERT(esdp->type == ERTS_SCHED_NORMAL);
+ ASSERT(!no);
+#endif
+
tmo = ERTS_MONOTONIC_TO_CLKTCKS(tmo-1);
tmo += ERTS_MSEC_TO_CLKTCKS(1000) + 1;
erts_twheel_init_timer(&aux_work_tmo->timer.data);
@@ -2524,7 +2691,6 @@ start_aux_work_timer(ErtsSchedulerData *esdp)
erts_twheel_set_timer(esdp->timer_wheel,
&aux_work_tmo->timer.data,
aux_work_timeout,
- NULL,
(void *) esdp,
tmo);
}
@@ -2553,16 +2719,19 @@ aux_work_timeout_early_init(int no_schedulers)
aux_work_tmo = (ErtsAuxWorkTmo *) p;
aux_work_tmo->initialized = 0;
erts_atomic32_init_nob(&aux_work_tmo->refc, 0);
+#ifdef DEBUG
+ erts_atomic32_init_nob(&aux_work_tmo->used, 0);
+#endif
for (i = 0; i <= no_schedulers; i++)
erts_atomic32_init_nob(&aux_work_tmo->type[i], 0);
}
void
-aux_work_timeout_late_init(void)
+erts_aux_work_timeout_late_init(ErtsSchedulerData *esdp)
{
aux_work_tmo->initialized = 1;
- if (erts_atomic32_read_nob(&aux_work_tmo->refc))
- start_aux_work_timer(erts_get_scheduler_data());
+ if (erts_atomic32_read_acqb(&aux_work_tmo->refc))
+ start_aux_work_timer(esdp);
}
static void
@@ -2570,12 +2739,15 @@ aux_work_timeout(void *vesdp)
{
erts_aint32_t refc;
int i;
-#ifdef ERTS_SMP
- i = 0;
-#else
- i = 1;
+#ifdef DEBUG
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ Uint no = (Uint) erts_atomic32_xchg_mb(&aux_work_tmo->used, 0);
+ ASSERT(no == esdp->no);
+ ASSERT(esdp == (ErtsSchedulerData *) vesdp);
#endif
+ i = 0;
+
for (; i <= erts_no_schedulers; i++) {
erts_aint32_t type;
type = erts_atomic32_read_acqb(&aux_work_tmo->type[i]);
@@ -2608,9 +2780,6 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable)
{
erts_aint32_t old, refc;
-#ifndef ERTS_SMP
- ix = 1;
-#endif
ERTS_DBG_CHK_AUX_WORK_VAL(type);
ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
@@ -2637,104 +2806,20 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable)
return old;
}
-
-
-static ERTS_INLINE void
-sched_waiting_sys(Uint no, ErtsRunQueue *rq)
-{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- ASSERT(rq->waiting >= 0);
- (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
- rq->waiting++;
- rq->waiting *= -1;
- rq->woken = 0;
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(no), am_inactive);
-}
-
-static ERTS_INLINE void
-sched_active_sys(Uint no, ErtsRunQueue *rq)
-{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
- ASSERT(rq->waiting < 0);
- rq->waiting *= -1;
- rq->waiting--;
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(no), am_active);
-}
-
Uint
erts_active_schedulers(void)
{
Uint as = erts_no_schedulers;
- ERTS_ATOMIC_FOREACH_RUNQ(rq, as -= abs(rq->waiting));
+ ERTS_ATOMIC_FOREACH_NORMAL_RUNQ(rq, as -= abs(rq->waiting));
return as;
}
-#ifdef ERTS_SMP
-
-static ERTS_INLINE void
-clear_sys_scheduling(void)
-{
- erts_smp_atomic32_set_mb(&doing_sys_schedule, 0);
-}
-
-static ERTS_INLINE int
-try_set_sys_scheduling(void)
-{
- return 0 == erts_smp_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0);
-}
-
-#endif
-
-static ERTS_INLINE int
-prepare_for_sys_schedule(int non_blocking)
-{
- if (non_blocking && erts_eager_check_io) {
-#ifdef ERTS_SMP
- return try_set_sys_scheduling();
-#else
- return 1;
-#endif
- }
- else {
-#ifdef ERTS_SMP
- while (!erts_port_task_have_outstanding_io_tasks()
- && try_set_sys_scheduling()) {
- if (!erts_port_task_have_outstanding_io_tasks())
- return 1;
- clear_sys_scheduling();
- }
- return 0;
-#else
- return !erts_port_task_have_outstanding_io_tasks();
-#endif
- }
-}
-
-#ifdef ERTS_SMP
-
-static ERTS_INLINE void
-sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq)
-{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
- ASSERT(rq->waiting < 0);
- rq->waiting *= -1;
-}
-
static ERTS_INLINE void
sched_waiting(Uint no, ErtsRunQueue *rq)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
(void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
if (rq->waiting < 0)
@@ -2749,7 +2834,7 @@ sched_waiting(Uint no, ErtsRunQueue *rq)
static ERTS_INLINE void
sched_active(Uint no, ErtsRunQueue *rq)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
if (rq->waiting < 0)
rq->waiting++;
else
@@ -2763,7 +2848,7 @@ empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags)
{
if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && old_flags & ERTS_RUNQ_FLG_NONEMPTY) {
#ifdef DEBUG
- erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
+ erts_aint32_t empty = erts_atomic32_read_nob(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
@@ -2771,9 +2856,9 @@ empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags)
ASSERT(0 <= empty && empty < 2*erts_no_run_queues);
#endif
if (!erts_runq_supervision_interval)
- erts_smp_atomic32_inc_relb(&no_empty_run_queues);
+ erts_atomic32_inc_relb(&no_empty_run_queues);
else {
- erts_smp_atomic32_inc_mb(&no_empty_run_queues);
+ erts_atomic32_inc_mb(&no_empty_run_queues);
if (erts_atomic_read_nob(&runq_supervisor_sleeping))
ethr_event_set(&runq_supervision_event);
}
@@ -2803,7 +2888,7 @@ non_empty_runq(ErtsRunQueue *rq)
Uint32 old_flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_NONEMPTY);
if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && (!(old_flags & ERTS_RUNQ_FLG_NONEMPTY))) {
#ifdef DEBUG
- erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
+ erts_aint32_t empty = erts_atomic32_read_nob(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
@@ -2811,10 +2896,10 @@ non_empty_runq(ErtsRunQueue *rq)
ASSERT(0 < empty && empty <= 2*erts_no_run_queues);
#endif
if (!erts_runq_supervision_interval)
- erts_smp_atomic32_dec_relb(&no_empty_run_queues);
+ erts_atomic32_dec_relb(&no_empty_run_queues);
else {
erts_aint32_t no;
- no = erts_smp_atomic32_dec_read_mb(&no_empty_run_queues);
+ no = erts_atomic32_dec_read_mb(&no_empty_run_queues);
if (no > 0 && erts_atomic_read_nob(&runq_supervisor_sleeping))
ethr_event_set(&runq_supervision_event);
}
@@ -2837,12 +2922,13 @@ static erts_aint32_t
sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
erts_aint32_t oflgs;
- erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING);
+ erts_aint32_t nflgs;
erts_aint32_t xflgs = 0;
do {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ nflgs = (xflgs & ERTS_SSI_FLG_MSB_EXEC);
+ nflgs |= ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+ oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -2859,11 +2945,11 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
erts_aint32_t xflgs = ERTS_SSI_FLG_WAITING;
do {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
- nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs |= oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
} while (oflgs & ERTS_SSI_FLG_WAITING);
return oflgs;
}
@@ -2876,7 +2962,7 @@ sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
erts_aint32_t flgs;
do {
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ flgs = erts_atomic32_read_acqb(&ssi->flags);
if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
!= (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
break;
@@ -2901,11 +2987,11 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
erts_tse_reset(ssi->event);
else {
ASSERT(sleep_type == ERTS_SSI_FLG_POLL_SLEEPING);
- erts_sys_schedule_interrupt(0);
+ erts_check_io_interrupt(ssi->psi, 0);
}
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
@@ -2913,7 +2999,7 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
return oflgs;
}
xflgs = oflgs;
- nflgs |= oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs |= oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
}
}
@@ -2932,7 +3018,7 @@ static void
thr_prgr_prep_wait(void *vssi)
{
ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
- erts_smp_atomic32_read_bor_acqb(&ssi->flags,
+ erts_atomic32_read_bor_acqb(&ssi->flags,
ERTS_SSI_FLG_SLEEPING);
}
@@ -2947,7 +3033,7 @@ thr_prgr_wait(void *vssi)
while (1) {
erts_aint32_t aflgs, nflgs;
nflgs = xflgs | ERTS_SSI_FLG_TSE_SLEEPING;
- aflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ aflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (aflgs == xflgs) {
erts_tse_wait(ssi->event);
break;
@@ -2962,7 +3048,7 @@ static void
thr_prgr_fin_wait(void *vssi)
{
ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
- erts_smp_atomic32_read_band_nob(&ssi->flags,
+ erts_atomic32_read_band_nob(&ssi->flags,
~(ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_TSE_SLEEPING));
}
@@ -2970,9 +3056,8 @@ thr_prgr_fin_wait(void *vssi)
static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp);
void
-erts_interupt_aux_thread_timed(ErtsMonotonicTime timeout_time)
+erts_aux_thread_poke()
{
- /* TODO only poke when needed (based on timeout_time) */
erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(-1));
}
@@ -2983,7 +3068,9 @@ aux_thread(void *unused)
ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(-1);
erts_aint32_t aux_work;
ErtsThrPrgrCallbacks callbacks;
+ ErtsThrPrgrData *tpd;
int thr_prgr_active = 1;
+ ERTS_MSACC_DECLARE_CACHE();
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -2992,6 +3079,7 @@ aux_thread(void *unused)
}
#endif
+ erts_port_task_pre_alloc_init_thread();
ssi->event = erts_tse_fetch();
erts_msacc_init_thread("aux", 1, 1);
@@ -3002,45 +3090,78 @@ aux_thread(void *unused)
callbacks.wait = thr_prgr_wait;
callbacks.finalize_wait = thr_prgr_fin_wait;
- erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
+#if ERTS_POLL_USE_FALLBACK
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ ssi->psi = erts_create_pollset_thread(-2, tpd);
+#else
+ ssi->psi = erts_create_pollset_thread(-1, tpd);
+#endif
+#endif
sched_prep_spin_wait(ssi);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
+
while (1) {
erts_aint32_t flgs;
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work) {
if (!thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ erts_thr_progress_active(tpd, thr_prgr_active = 1);
aux_work = handle_aux_work(awdp, aux_work, 1);
- if (aux_work && erts_thr_progress_update(NULL))
- erts_thr_progress_leader_update(NULL);
+ ERTS_MSACC_UPDATE_CACHE();
+ if (aux_work && erts_thr_progress_update(tpd))
+ erts_thr_progress_leader_update(tpd);
}
if (!aux_work) {
+
+#ifdef ERTS_BREAK_REQUESTED
+ if (ERTS_BREAK_REQUESTED)
+ erts_do_break_handling();
+#endif
+
if (thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 0);
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_active(tpd, thr_prgr_active = 0);
+
+#if ERTS_POLL_USE_FALLBACK
flgs = sched_spin_wait(ssi, 0);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ erts_check_io(ssi->psi, ERTS_POLL_INF_TIMEOUT);
+ }
+ }
+#else
+ erts_thr_progress_prepare_wait(tpd);
+
+ flgs = sched_spin_wait(ssi, 0);
+
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
+ int res;
ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_SLEEP);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
}
- }
- erts_thr_progress_finalize_wait(NULL);
+ }
+ erts_thr_progress_finalize_wait(tpd);
+#endif
}
flgs = sched_prep_spin_wait(ssi);
@@ -3048,383 +3169,323 @@ aux_thread(void *unused)
return NULL;
}
-#endif /* ERTS_SMP */
-
-static void
-scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
+static void *
+poll_thread(void *arg)
{
- int working = 1;
- ErtsSchedulerSleepInfo *ssi = esdp->ssi;
- int spincount;
- erts_aint32_t aux_work = 0;
-#ifdef ERTS_SMP
+ int id = (int)(UWord)arg;
+ ErtsAuxWorkData *awdp = poll_thread_aux_work_data+id;
+ ErtsSchedulerSleepInfo *ssi = ERTS_POLL_THREAD_SLEEP_INFO_IX(id);
+ erts_aint32_t aux_work;
+ ErtsThrPrgrCallbacks callbacks;
int thr_prgr_active = 1;
- erts_aint32_t flgs;
-#endif
- ERTS_MSACC_PUSH_STATE_M();
-#ifdef ERTS_SMP
-
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- erts_smp_spin_lock(&rq->sleepers.lock);
-#endif
- flgs = sched_prep_spin_wait(ssi);
- if (flgs & ERTS_SSI_FLG_SUSPENDED) {
- /* Go suspend instead... */
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- erts_smp_spin_unlock(&rq->sleepers.lock);
-#endif
- return;
- }
+ struct erts_poll_thread *psi;
+ ErtsThrPrgrData *tpd;
+ ERTS_MSACC_DECLARE_CACHE();
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
- ssi->prev = NULL;
- ssi->next = rq->sleepers.list;
- if (rq->sleepers.list)
- rq->sleepers.list->prev = ssi;
- rq->sleepers.list = ssi;
- erts_smp_spin_unlock(&rq->sleepers.lock);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ char buf[] = "poll_thread";
+ erts_lc_set_thread_name(buf);
}
#endif
- /*
- * If all schedulers are waiting, one of them *should*
- * be waiting in erl_sys_schedule()
- */
-
- if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(0)) {
+ erts_port_task_pre_alloc_init_thread();
+ ssi->event = erts_tse_fetch();
- sched_waiting(esdp->no, rq);
+ erts_msacc_init_thread("poll", id, 0);
- erts_smp_runq_unlock(rq);
+ callbacks.arg = (void *) ssi;
+ callbacks.wakeup = thr_prgr_wakeup;
+ callbacks.prepare_wait = thr_prgr_prep_wait;
+ callbacks.wait = thr_prgr_wait;
+ callbacks.finalize_wait = thr_prgr_fin_wait;
- spincount = sched_busy_wait.tse;
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
+ init_aux_work_data(awdp, NULL, NULL);
+ awdp->ssi = ssi;
- tse_wait:
+ psi = erts_create_pollset_thread(id, tpd);
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && thr_prgr_active != working)
- sched_wall_time_change(esdp, thr_prgr_active);
+ ssi->psi = psi;
- while (1) {
- ErtsMonotonicTime current_time = 0;
+ sched_prep_spin_wait(ssi);
- aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
- ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
- }
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
- if (aux_work) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
- current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- erts_bump_timers(esdp->timer_wheel, current_time);
- }
- }
- }
- else {
- ErtsMonotonicTime timeout_time;
- int do_timeout = 0;
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- timeout_time = erts_check_next_timeout_time(esdp);
- current_time = erts_get_monotonic_time(esdp);
- do_timeout = (current_time >= timeout_time);
- } else {
- current_time = 0;
- timeout_time = ERTS_MONOTONIC_TIME_MAX;
- }
- if (do_timeout) {
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- }
- else {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
- sched_wall_time_change(esdp, 0);
- }
- erts_thr_progress_prepare_wait(esdp);
- }
+ while (1) {
+ erts_aint32_t flgs;
- flgs = sched_spin_wait(ssi, spincount);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
- ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
- erts_get_monotonic_time(esdp);
- do {
- Sint64 timeout;
- if (current_time >= timeout_time)
- break;
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time
- - current_time
- - 1) + 1;
- } else
- timeout = -1;
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
- res = erts_tse_twait(ssi->event, timeout);
- ERTS_MSACC_POP_STATE_M();
- current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
- erts_get_monotonic_time(esdp);
- } while (res == EINTR);
- }
- }
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
- erts_thr_progress_finalize_wait(esdp);
- }
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time)
- erts_bump_timers(esdp->timer_wheel, current_time);
- }
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work) {
+ if (!thr_prgr_active)
+ erts_thr_progress_active(tpd, thr_prgr_active = 1);
+ aux_work = handle_aux_work(awdp, aux_work, 1);
+ ERTS_MSACC_UPDATE_CACHE();
+ if (aux_work && erts_thr_progress_update(tpd))
+ erts_thr_progress_leader_update(tpd);
+ }
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- break;
- }
+ if (!aux_work) {
+ if (thr_prgr_active)
+ erts_thr_progress_active(tpd, thr_prgr_active = 0);
- flgs = sched_prep_cont_spin_wait(ssi);
- spincount = sched_busy_wait.aux_work;
+ flgs = sched_spin_wait(ssi, 0);
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- break;
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ erts_check_io(psi, ERTS_POLL_INF_TIMEOUT);
+ }
}
-
- }
-
- if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
-
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
}
- erts_smp_runq_lock(rq);
- sched_active(esdp->no, rq);
-
+ flgs = sched_prep_spin_wait(ssi);
}
- else
-#endif
- {
-
- erts_smp_atomic32_set_relb(&function_calls, 0);
- *fcalls = 0;
-
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
- sched_waiting_sys(esdp->no, rq);
-
- erts_smp_runq_unlock(rq);
-
- ASSERT(working);
- sched_wall_time_change(esdp, working = 0);
+ return NULL;
+}
- spincount = sched_busy_wait.sys_schedule;
- if (spincount == 0)
- goto sys_aux_work;
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+static ERTS_INLINE void
+clear_sys_scheduling(void)
+{
+ erts_atomic32_set_mb(&doing_sys_schedule, 0);
+}
- while (spincount-- > 0) {
- ErtsMonotonicTime current_time;
+static ERTS_INLINE int
+try_set_sys_scheduling(void)
+{
+ return 0 == erts_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0);
+}
- sys_poll_aux_work:
- if (working)
- sched_wall_time_change(esdp, working = 0);
+static ERTS_INLINE int
+prepare_for_sys_schedule(void)
+{
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && try_set_sys_scheduling()) {
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ clear_sys_scheduling();
+ }
+ return 0;
+}
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
+static void
+check_io_timer(void *null)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (prepare_for_sys_schedule()) {
+ erts_check_io(esdp->ssi->psi, ERTS_POLL_NO_TIMEOUT);
+ clear_sys_scheduling();
+ }
+
+ /* The timer is cleared if this schedulers run-queue became empty
+ or if the CHECKIO flag was cleared. The CHECKIO flags is cleared
+ when a check_balance assigns another scheduler to be the poller in
+ the overload scenario. */
+ if ((ERTS_RUNQ_FLGS_GET_NOB(esdp->run_queue) & (ERTS_RUNQ_FLG_OUT_OF_WORK|ERTS_RUNQ_FLG_CHECKIO))
+ == ERTS_RUNQ_FLG_CHECKIO) {
+ erts_start_timer_callback(ERTS_POLL_SCHEDULER_POLLING_TIMEOUT,
+ check_io_timer, NULL);
+ } else {
+ ERTS_RUNQ_FLGS_UNSET(esdp->run_queue, ERTS_RUNQ_FLG_CHECKIO);
+ }
+}
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
- LTTNG2(scheduler_poll, esdp->no, 1);
- erl_sys_schedule(1); /* Might give us something to do */
+#else
+#define clear_sys_scheduling()
+#define prepare_for_sys_schedule() 0
+#endif
- ERTS_MSACC_POP_STATE_M();
+static void
+scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
+{
+ int working = 1;
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ int spincount;
+ erts_aint32_t aux_work = 0;
+ int thr_prgr_active = 1;
+ erts_aint32_t flgs;
+ ERTS_MSACC_PUSH_STATE();
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
- erts_bump_timers(esdp->timer_wheel, current_time);
- }
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
- sys_aux_work:
-#ifndef ERTS_SMP
- erts_sys_schedule_interrupt(0);
-#endif
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+ erts_spin_lock(&rq->sleepers.lock);
+ flgs = sched_prep_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_SUSPENDED) {
+ /* Go suspend instead... */
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+ erts_spin_unlock(&rq->sleepers.lock);
+ return;
+ }
- aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (!working)
- sched_wall_time_change(esdp, working = 1);
-#ifdef ERTS_SMP
- if (!thr_prgr_active)
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
-#endif
- aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
- ERTS_MSACC_UPDATE_CACHE();
-#ifdef ERTS_SMP
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
-#endif
- }
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
+ ssi->prev = NULL;
+ ssi->next = rq->sleepers.list;
+ if (rq->sleepers.list)
+ rq->sleepers.list->prev = ssi;
+ rq->sleepers.list = ssi;
+ erts_spin_unlock(&rq->sleepers.lock);
+ dirty_active(esdp, -1);
+ }
-#ifndef ERTS_SMP
- if (erts_smp_atomic32_read_dirty(&rq->len) != 0 || rq->misc.start)
- goto sys_woken;
-#else
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_woken;
- }
+ sched_waiting(esdp->no, rq);
- /*
- * If we got new I/O tasks we aren't allowed to
- * call erl_sys_schedule() until it is handled.
- */
- if (erts_port_task_have_outstanding_io_tasks()) {
- clear_sys_scheduling();
- /*
- * Got to check that we still got I/O tasks; otherwise
- * we have to continue checking for I/O...
- */
- if (!prepare_for_sys_schedule(0)) {
- spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
- goto tse_wait;
- }
- }
-#endif
- }
+ erts_runq_unlock(rq);
- erts_smp_runq_lock(rq);
+ spincount = sched_get_busy_wait_params(esdp)->tse;
-#ifdef ERTS_SMP
- /*
- * If we got new I/O tasks we aren't allowed to
- * sleep in erl_sys_schedule().
- */
- if (erts_port_task_have_outstanding_io_tasks()) {
- clear_sys_scheduling();
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_sched_wall_time_change(esdp, working = 0);
+ else if (thr_prgr_active != working)
+ sched_wall_time_change(esdp, working = thr_prgr_active);
- /*
- * Got to check that we still got I/O tasks; otherwise
- * we have to wait in erl_sys_schedule() after all...
- */
- if (!prepare_for_sys_schedule(0)) {
- /*
- * Not allowed to wait in erl_sys_schedule;
- * do tse wait instead...
- */
- sched_change_waiting_sys_to_waiting(esdp->no, rq);
- erts_smp_runq_unlock(rq);
- spincount = 0;
- goto tse_wait;
- }
- }
-#endif
- if (aux_work) {
- erts_smp_runq_unlock(rq);
- goto sys_poll_aux_work;
- }
-#ifdef ERTS_SMP
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
- if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_locked_woken;
- }
- erts_smp_runq_unlock(rq);
- flgs = sched_prep_cont_spin_wait(ssi);
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_woken;
- }
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
- goto sys_poll_aux_work;
- }
+ while (1) {
+ ErtsMonotonicTime current_time = 0;
- ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
-#endif
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
+ ERTS_MSACC_UPDATE_CACHE();
+ if (aux_work && erts_thr_progress_update(erts_thr_prgr_data(esdp)))
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
+ }
- erts_smp_runq_unlock(rq);
+ if (aux_work) {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ flgs = erts_atomic32_read_acqb(&ssi->flags);
+ current_time = erts_get_monotonic_time(esdp);
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ }
+ }
+ }
+ else {
+ ErtsMonotonicTime timeout_time;
+ int do_timeout = 0;
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ timeout_time = erts_check_next_timeout_time(esdp);
+ current_time = erts_get_monotonic_time(esdp);
+ do_timeout = (current_time >= timeout_time);
+ } else {
+ current_time = 0;
+ timeout_time = ERTS_MONOTONIC_TIME_MAX;
+ }
+ if (do_timeout) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ }
+ else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && prepare_for_sys_schedule()) {
+ /* We sleep in check_io, only for normal schedulers */
+ if (thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ flgs = sched_spin_wait(ssi, 0);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ erts_check_io(ssi->psi, timeout_time);
+ current_time = erts_get_monotonic_time(esdp);
+ }
+ }
+ clear_sys_scheduling();
+ } else {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(erts_thr_prgr_data(esdp));
+ }
+ flgs = sched_spin_wait(ssi, spincount);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
+ erts_get_monotonic_time(esdp);
+ do {
+ Sint64 timeout;
+ if (current_time >= timeout_time)
+ break;
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time
+ - current_time
+ - 1) + 1;
+ } else
+ timeout = -1;
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_SLEEP);
+ res = erts_tse_twait(ssi->event, timeout);
+ ERTS_MSACC_POP_STATE();
+ current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
+ erts_get_monotonic_time(esdp);
+ } while (res == EINTR);
+ }
+ }
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+ erts_thr_progress_finalize_wait(erts_thr_prgr_data(esdp));
+ }
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time)
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ }
- if (working)
- sched_wall_time_change(esdp, working = 0);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
-#ifdef ERTS_SMP
- if (thr_prgr_active)
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
-#endif
+ flgs = sched_prep_cont_spin_wait(ssi);
+ spincount = sched_get_busy_wait_params(esdp)->aux_work;
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ break;
+ }
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
- LTTNG2(scheduler_poll, esdp->no, 0);
+ }
- erl_sys_schedule(0);
+ if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC))
+ erts_atomic32_read_band_nob(&ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC));
- ERTS_MSACC_POP_STATE_M();
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_sched_wall_time_change(esdp, working = 1);
+ else if (!thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- ErtsMonotonicTime current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
- erts_bump_timers(esdp->timer_wheel, current_time);
- }
+ erts_runq_lock(rq);
+ sched_active(esdp->no, rq);
-#ifndef ERTS_SMP
- if (erts_smp_atomic32_read_dirty(&rq->len) == 0 && !rq->misc.start)
- goto sys_aux_work;
- sys_woken:
-#else
- flgs = sched_prep_cont_spin_wait(ssi);
- if (flgs & ERTS_SSI_FLG_WAITING)
- goto sys_aux_work;
-
- sys_woken:
- if (!thr_prgr_active)
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- erts_smp_runq_lock(rq);
- sys_locked_woken:
- if (!thr_prgr_active) {
- erts_smp_runq_unlock(rq);
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- erts_smp_runq_lock(rq);
- }
- clear_sys_scheduling();
- if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
-#endif
- if (!working)
- sched_wall_time_change(esdp, working = 1);
- sched_active_sys(esdp->no, rq);
- }
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ dirty_active(esdp, 1);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
}
-#ifdef ERTS_SMP
static ERTS_INLINE erts_aint32_t
ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
@@ -3434,20 +3495,55 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
erts_aint32_t nflgs = 0;
erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return oflgs;
- nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
+ nflgs = oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC);
xflgs = oflgs;
}
}
+static ERTS_INLINE void
+ssi_wake(ErtsSchedulerSleepInfo *ssi)
+{
+ erts_sched_finish_poke(ssi, ssi_flags_set_wake(ssi));
+}
+
+
static void
-wake_scheduler(ErtsRunQueue *rq)
+dcpu_sched_ix_suspend_wake(Uint ix)
{
- ErtsSchedulerSleepInfo *ssi;
- erts_aint32_t flgs;
+ ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
+ erts_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ ssi_wake(ssi);
+}
+
+static void
+dio_sched_ix_suspend_wake(Uint ix)
+{
+ ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
+ erts_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ ssi_wake(ssi);
+}
+static void
+dcpu_sched_ix_wake(Uint ix)
+{
+ ssi_wake(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+}
+
+#if 0
+static void
+dio_sched_ix_wake(Uint ix)
+{
+ ssi_wake(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix));
+}
+#endif
+
+
+static void
+wake_scheduler(ErtsRunQueue *rq)
+{
/*
* The unlocked run queue is not strictly necessary
* from a thread safety or deadlock prevention
@@ -3456,16 +3552,12 @@ wake_scheduler(ErtsRunQueue *rq)
* so all code *should* handle this without having
* the lock on the run queue.
*/
- ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq)
+ ERTS_LC_ASSERT(!erts_lc_runq_is_locked(rq)
|| ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
- ssi = rq->scheduler->ssi;
-
- flgs = ssi_flags_set_wake(ssi);
- erts_sched_finish_poke(ssi, flgs);
+ ssi_wake(rq->scheduler->ssi);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static void
wake_dirty_schedulers(ErtsRunQueue *rq, int one)
{
@@ -3475,10 +3567,10 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one)
ASSERT(ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
sl = &rq->sleepers;
- erts_smp_spin_lock(&sl->lock);
+ erts_spin_lock(&sl->lock);
ssi = sl->list;
if (!ssi) {
- erts_smp_spin_unlock(&sl->lock);
+ erts_spin_unlock(&sl->lock);
if (one)
wake_scheduler(rq);
} else if (one) {
@@ -3492,14 +3584,14 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one)
if (ssi->next)
ssi->next->prev = ssi->prev;
- erts_smp_spin_unlock(&sl->lock);
+ erts_spin_unlock(&sl->lock);
ERTS_THR_MEMORY_BARRIER;
flgs = ssi_flags_set_wake(ssi);
erts_sched_finish_poke(ssi, flgs);
} else {
sl->list = NULL;
- erts_smp_spin_unlock(&sl->lock);
+ erts_spin_unlock(&sl->lock);
ERTS_THR_MEMORY_BARRIER;
do {
@@ -3509,7 +3601,13 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one)
} while (ssi);
}
}
-#endif
+
+static void
+wake_dirty_scheduler(ErtsRunQueue *rq)
+{
+ wake_dirty_schedulers(rq, 1);
+}
+
#define ERTS_NO_USED_RUNQS_SHIFT 16
#define ERTS_NO_RUNQS_MASK 0xffffU
@@ -3523,13 +3621,13 @@ init_no_runqs(int active, int used)
{
erts_aint32_t no_runqs = (erts_aint32_t) (active & ERTS_NO_RUNQS_MASK);
no_runqs |= (erts_aint32_t) ((used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT);
- erts_smp_atomic32_init_nob(&balance_info.no_runqs, no_runqs);
+ erts_atomic32_init_nob(&balance_info.no_runqs, no_runqs);
}
static ERTS_INLINE void
get_no_runqs(int *active, int *used)
{
- erts_aint32_t no_runqs = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
+ erts_aint32_t no_runqs = erts_atomic32_read_nob(&balance_info.no_runqs);
if (active)
*active = (int) (no_runqs & ERTS_NO_RUNQS_MASK);
if (used)
@@ -3539,12 +3637,12 @@ get_no_runqs(int *active, int *used)
static ERTS_INLINE void
set_no_used_runqs(int used)
{
- erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs);
while (1) {
erts_aint32_t act, new;
new = (used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT;
new |= exp & ERTS_NO_RUNQS_MASK;
- act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
+ act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
break;
exp = act;
@@ -3554,14 +3652,14 @@ set_no_used_runqs(int used)
static ERTS_INLINE void
set_no_active_runqs(int active)
{
- erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs);
while (1) {
erts_aint32_t act, new;
if ((exp & ERTS_NO_RUNQS_MASK) == active)
break;
new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT);
new |= active & ERTS_NO_RUNQS_MASK;
- act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
+ act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
break;
exp = act;
@@ -3571,14 +3669,14 @@ set_no_active_runqs(int active)
static ERTS_INLINE int
try_inc_no_active_runqs(int active)
{
- erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs);
if (((exp >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK) < active)
return 0;
if ((exp & ERTS_NO_RUNQS_MASK) + 1 == active) {
erts_aint32_t new, act;
new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT);
new |= active & ERTS_NO_RUNQS_MASK;
- act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
+ act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
return 1;
}
@@ -3640,25 +3738,20 @@ wake_scheduler_on_empty_runq(ErtsRunQueue *crq)
}
}
-#endif /* ERTS_SMP */
static ERTS_INLINE void
smp_notify_inc_runq(ErtsRunQueue *runq)
{
-#ifdef ERTS_SMP
if (runq) {
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
- wake_dirty_schedulers(runq, 1);
+ wake_dirty_scheduler(runq);
else
-#endif
wake_scheduler(runq);
}
-#endif
}
void
-erts_smp_notify_inc_runq(ErtsRunQueue *runq)
+erts_notify_inc_runq(ErtsRunQueue *runq)
{
smp_notify_inc_runq(runq);
}
@@ -3666,16 +3759,12 @@ erts_smp_notify_inc_runq(ErtsRunQueue *runq)
void
erts_sched_notify_check_cpu_bind(void)
{
-#ifdef ERTS_SMP
int ix;
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
(void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
wake_scheduler(rq);
}
-#else
- erts_sched_check_cpu_bind(erts_get_scheduler_data());
-#endif
}
@@ -3684,9 +3773,9 @@ enqueue_process(ErtsRunQueue *runq, int prio, Process *p)
{
ErtsRunPrioQueue *rpq;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
- erts_smp_inc_runq_len(runq, &runq->procs.prio_info[prio], prio);
+ erts_inc_runq_len(runq, &runq->procs.prio_info[prio], prio);
if (prio == PRIORITY_LOW) {
p->schedule_count = RESCHEDULE_LOW;
@@ -3714,7 +3803,7 @@ unqueue_process(ErtsRunQueue *runq,
Process *prev_proc,
Process *proc)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
if (prev_proc)
prev_proc->next = proc->next;
@@ -3726,7 +3815,7 @@ unqueue_process(ErtsRunQueue *runq,
if (!rpq->first)
rpq->last = NULL;
- erts_smp_dec_runq_len(runq, rqi, prio);
+ erts_dec_runq_len(runq, rqi, prio);
}
@@ -3739,7 +3828,7 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep)
ErtsRunQueueInfo *rqi;
Process *p;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq));
ASSERT(PRIORITY_NORMAL == prio_q
|| PRIORITY_HIGH == prio_q
@@ -3750,9 +3839,11 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep)
if (!p)
return NULL;
- ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+ ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+
+ state = erts_atomic32_read_nob(&p->state);
+ ASSERT(state & ERTS_PSFLG_IN_RUNQ);
- state = erts_smp_atomic32_read_nob(&p->state);
if (statep)
*statep = state;
@@ -3760,8 +3851,7 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep)
rqi = &runq->procs.prio_info[prio];
- if (p)
- unqueue_process(runq, rpq, rqi, prio, NULL, p);
+ unqueue_process(runq, rpq, rqi, prio, NULL, p);
return p;
}
@@ -3785,11 +3875,10 @@ check_requeue_process(ErtsRunQueue *rq, int prio_q)
static ERTS_INLINE void
free_proxy_proc(Process *proxy)
{
- ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
+ ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
erts_free(ERTS_ALC_T_PROC, proxy);
}
-#ifdef ERTS_SMP
static ErtsRunQueue *
check_immigration_need(ErtsRunQueue *c_rq, ErtsMigrationPath *mp, int prio)
@@ -3842,7 +3931,7 @@ static void
immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
{
Uint32 iflags, iflag;
- erts_smp_runq_unlock(c_rq);
+ erts_runq_unlock(c_rq);
ASSERT(erts_thr_progress_is_managed_thread());
@@ -3883,26 +3972,21 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
rq = check_immigration_need(c_rq, mp, prio);
if (rq) {
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
if (prio == ERTS_PORT_PRIO_LEVEL) {
Port *prt;
prt = erts_dequeue_port(rq);
if (prt)
- RUNQ_SET_RQ(&prt->run_queue, c_rq);
- erts_smp_runq_unlock(rq);
+ erts_set_runq_port(prt, c_rq);
+ erts_runq_unlock(rq);
if (prt) {
- /* port might terminate while we have no lock... */
rq = erts_port_runq(prt);
- if (rq) {
- if (rq != c_rq)
- erts_exit(ERTS_ABORT_EXIT,
- "%s:%d:%s(): Internal error",
- __FILE__, __LINE__, __func__);
- erts_enqueue_port(c_rq, prt);
- if (!iflag)
- return; /* done */
- erts_smp_runq_unlock(c_rq);
- }
+ if (rq != c_rq)
+ ERTS_INTERNAL_ERROR("Unexpected run-queue");
+ erts_enqueue_port(c_rq, prt);
+ if (!iflag)
+ return; /* done */
+ erts_runq_unlock(c_rq);
}
}
else {
@@ -3915,46 +3999,44 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
while (proc) {
erts_aint32_t state;
- state = erts_smp_atomic32_read_acqb(&proc->state);
- if (!(ERTS_PSFLG_BOUND & state)
- && (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state))) {
+ state = erts_atomic32_read_acqb(&proc->state);
+ if (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state)
+ && erts_try_change_runq_proc(proc, c_rq)) {
ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio];
unqueue_process(rq, rpq, rqi, prio, prev_proc, proc);
- erts_smp_runq_unlock(rq);
- RUNQ_SET_RQ(&proc->run_queue, c_rq);
+ erts_runq_unlock(rq);
rq_locked = 0;
- erts_smp_runq_lock(c_rq);
+ erts_runq_lock(c_rq);
enqueue_process(c_rq, prio, proc);
if (!iflag)
return; /* done */
- erts_smp_runq_unlock(c_rq);
+ erts_runq_unlock(c_rq);
break;
}
prev_proc = proc;
proc = proc->next;
}
if (rq_locked)
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
}
}
}
- erts_smp_runq_lock(c_rq);
+ erts_runq_lock(c_rq);
}
static ERTS_INLINE void
suspend_run_queue(ErtsRunQueue *rq)
{
- erts_smp_atomic32_read_bor_nob(&rq->scheduler->ssi->flags,
+ erts_atomic32_read_bor_nob(&rq->scheduler->ssi->flags,
ERTS_SSI_FLG_SUSPENDED);
(void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED);
wake_scheduler(rq);
}
-static void scheduler_ix_resume_wake(Uint ix);
-static void scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi);
+static void nrml_sched_ix_resume_wake(Uint ix);
static ERTS_INLINE void
resume_run_queue(ErtsRunQueue *rq)
@@ -3962,38 +4044,38 @@ resume_run_queue(ErtsRunQueue *rq)
int pix;
Uint32 oflgs;
- erts_smp_runq_lock(rq);
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+
+ erts_runq_lock(rq);
oflgs = ERTS_RUNQ_FLGS_READ_BSET(rq,
(ERTS_RUNQ_FLG_OUT_OF_WORK
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK
- | ERTS_RUNQ_FLG_SUSPENDED),
+ | ERTS_RUNQ_FLG_SUSPENDED
+ | ERTS_RUNQ_FLG_MSB_EXEC),
(ERTS_RUNQ_FLG_OUT_OF_WORK
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
- if (oflgs & ERTS_RUNQ_FLG_SUSPENDED) {
+ if (oflgs & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_MSB_EXEC)) {
erts_aint32_t len;
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
- len = erts_smp_atomic32_read_dirty(&rq->procs.prio_info[pix].len);
+ len = erts_atomic32_read_dirty(&rq->procs.prio_info[pix].len);
rq->procs.prio_info[pix].max_len = len;
rq->procs.prio_info[pix].reds = 0;
}
- len = erts_smp_atomic32_read_dirty(&rq->ports.info.len);
+ len = erts_atomic32_read_dirty(&rq->ports.info.len);
rq->ports.info.max_len = len;
rq->ports.info.reds = 0;
- len = erts_smp_atomic32_read_dirty(&rq->len);
+ len = erts_atomic32_read_dirty(&rq->len);
rq->max_len = len;
}
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- scheduler_ix_resume_wake(rq->ix);
+ nrml_sched_ix_resume_wake(rq->ix);
}
typedef struct {
@@ -4006,18 +4088,17 @@ schedule_bound_processes(ErtsRunQueue *rq,
ErtsStuckBoundProcesses *sbpp)
{
Process *proc, *next;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
proc = sbpp->first;
while (proc) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
+ erts_aint32_t state = erts_atomic32_read_acqb(&proc->state);
next = proc->next;
enqueue_process(rq, (int) ERTS_PSFLGS_GET_PRQ_PRIO(state), proc);
proc = next;
}
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static ERTS_INLINE void
clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit)
@@ -4037,11 +4118,10 @@ clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit)
#else
(void)
#endif
- erts_smp_atomic32_read_band_mb(&p->dirty_state, ~qb);
+ erts_atomic32_read_band_mb(&p->dirty_state, ~qb);
ASSERT(old & qb);
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
static void
@@ -4051,28 +4131,22 @@ evacuate_run_queue(ErtsRunQueue *rq,
int prio_q;
ErtsRunQueue *to_rq;
ErtsMigrationPaths *mps;
- ErtsMigrationPath *mp = NULL;
+ ErtsMigrationPath *mp;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- {
- mps = erts_get_migration_paths_managed();
- mp = &mps->mpath[rq->ix];
- }
+ ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
+
+ mps = erts_get_migration_paths_managed();
+ mp = &mps->mpath[rq->ix];
/* Evacuate scheduled misc ops */
if (rq->misc.start) {
ErtsMiscOpList *start, *end;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
to_rq = mp->misc_evac_runq;
if (!to_rq)
return;
@@ -4081,9 +4155,10 @@ evacuate_run_queue(ErtsRunQueue *rq,
end = rq->misc.end;
rq->misc.start = NULL;
rq->misc.end = NULL;
- erts_smp_runq_unlock(rq);
+ ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
+ erts_runq_unlock(rq);
- erts_smp_runq_lock(to_rq);
+ erts_runq_lock(to_rq);
if (to_rq->misc.end)
to_rq->misc.end->next = start;
else
@@ -4093,17 +4168,14 @@ evacuate_run_queue(ErtsRunQueue *rq,
non_empty_runq(to_rq);
- erts_smp_runq_unlock(to_rq);
+ erts_runq_unlock(to_rq);
smp_notify_inc_runq(to_rq);
- erts_smp_runq_lock(to_rq);
+ erts_runq_lock(rq);
}
if (rq->ports.start) {
Port *prt;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#endif
to_rq = mp->prio[ERTS_PORT_PRIO_LEVEL].runq;
if (!to_rq)
return;
@@ -4113,22 +4185,14 @@ evacuate_run_queue(ErtsRunQueue *rq,
while (prt) {
ErtsRunQueue *prt_rq;
prt = erts_dequeue_port(rq);
- RUNQ_SET_RQ(&prt->run_queue, to_rq);
- erts_smp_runq_unlock(rq);
- /*
- * The port might terminate while
- * we have no lock on it...
- */
+ erts_set_runq_port(prt, to_rq);
+ erts_runq_unlock(rq);
prt_rq = erts_port_runq(prt);
- if (prt_rq) {
- if (prt_rq != to_rq)
- erts_exit(ERTS_ABORT_EXIT,
- "%s:%d:%s() internal error\n",
- __FILE__, __LINE__, __func__);
- erts_enqueue_port(to_rq, prt);
- erts_smp_runq_unlock(to_rq);
- }
- erts_smp_runq_lock(rq);
+ if (prt_rq != to_rq)
+ ERTS_INTERNAL_ERROR("Unexpected run-queue");
+ erts_enqueue_port(to_rq, prt);
+ erts_runq_unlock(to_rq);
+ erts_runq_lock(rq);
prt = rq->ports.start;
}
smp_notify_inc_runq(to_rq);
@@ -4138,31 +4202,23 @@ evacuate_run_queue(ErtsRunQueue *rq,
for (prio_q = 0; prio_q < ERTS_NO_PROC_PRIO_QUEUES; prio_q++) {
erts_aint32_t state;
Process *proc;
- int notify = 0;
- to_rq = NULL;
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
-#endif
- {
- if (!mp->prio[prio_q].runq)
- return;
- if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
- return;
- }
+ if (!mp->prio[prio_q].runq)
+ return;
+ if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
+ return;
proc = dequeue_process(rq, prio_q, &state);
while (proc) {
Process *real_proc;
int prio;
- erts_aint32_t max_qbit, qbit, real_state;
+ erts_aint32_t max_qbit, qbit;
prio = ERTS_PSFLGS_GET_PRQ_PRIO(state);
qbit = ((erts_aint32_t) 1) << prio;
if (!(state & ERTS_PSFLG_PROXY)) {
real_proc = proc;
- real_state = state;
}
else {
real_proc = erts_proc_lookup_raw(proc->common.id);
@@ -4170,7 +4226,6 @@ evacuate_run_queue(ErtsRunQueue *rq,
free_proxy_proc(proc);
goto handle_next_proc;
}
- real_state = erts_smp_atomic32_read_acqb(&real_proc->state);
}
max_qbit = (state >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET);
@@ -4184,33 +4239,29 @@ evacuate_run_queue(ErtsRunQueue *rq,
free_proxy_proc(proc);
else {
erts_aint32_t clr_bits;
-#ifdef DEBUG
- erts_aint32_t old;
-#endif
clr_bits = ERTS_PSFLG_IN_RUNQ;
clr_bits |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET;
-#ifdef DEBUG
- old =
-#else
- (void)
-#endif
- erts_smp_atomic32_read_band_mb(&proc->state,
- ~clr_bits);
- ASSERT((old & clr_bits) == clr_bits);
+ state = erts_atomic32_read_band_mb(&proc->state, ~clr_bits);
+ ASSERT((state & clr_bits) == clr_bits);
+ if (state & ERTS_PSFLG_FREE) {
+ /* free and not queued by proxy */
+ erts_proc_dec_refc(proc);
+ }
}
goto handle_next_proc;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- clear_proc_dirty_queue_bit(real_proc, rq, qbit);
-#endif
+ prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
+ to_rq = mp->prio[prio].runq;
+
+ if (!to_rq)
+ goto handle_next_proc;
- if (ERTS_PSFLG_BOUND & real_state) {
+ if (!erts_try_change_runq_proc(proc, to_rq)) {
/* Bound processes get stuck here... */
proc->next = NULL;
if (sbpp->last)
@@ -4220,34 +4271,21 @@ evacuate_run_queue(ErtsRunQueue *rq,
sbpp->last = proc;
}
else {
- int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
- erts_smp_runq_unlock(rq);
-
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- /*
- * dirty run queues evacuate only to run
- * queue 0 during multi-scheduling blocking
- */
- to_rq = ERTS_RUNQ_IX(0);
- else
-#endif
- to_rq = mp->prio[prio].runq;
- RUNQ_SET_RQ(&proc->run_queue, to_rq);
+ erts_runq_unlock(rq);
- erts_smp_runq_lock(to_rq);
+ erts_runq_lock(to_rq);
enqueue_process(to_rq, prio, proc);
- erts_smp_runq_unlock(to_rq);
- notify = 1;
+ erts_runq_unlock(to_rq);
+
+ smp_notify_inc_runq(to_rq);
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
}
handle_next_proc:
proc = dequeue_process(rq, prio_q, &state);
}
- if (notify)
- smp_notify_inc_runq(to_rq);
+
}
}
@@ -4259,15 +4297,15 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
ErtsRunPrioQueue *rpq;
if (*rq_lockedp) {
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
*rq_lockedp = 0;
}
- ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(!erts_lc_runq_is_locked(rq));
- erts_smp_runq_lock(vrq);
+ erts_runq_lock(vrq);
- if (rq->halt_in_progress)
+ if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_HALTING)
goto no_procs;
/*
@@ -4301,16 +4339,15 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
proc = rpq->first;
while (proc) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
- if (!(ERTS_PSFLG_BOUND & state)) {
+ if (erts_try_change_runq_proc(proc, rq)) {
+ erts_aint32_t state = erts_atomic32_read_acqb(&proc->state);
/* Steal process */
int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio];
unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc);
- erts_smp_runq_unlock(vrq);
- RUNQ_SET_RQ(&proc->run_queue, rq);
+ erts_runq_unlock(vrq);
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
*rq_lockedp = 1;
enqueue_process(rq, prio, proc);
return !0;
@@ -4324,7 +4361,7 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
no_procs:
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(vrq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(vrq));
/*
* Check for a runnable port to steal...
@@ -4333,29 +4370,17 @@ no_procs:
if (vrq->ports.start) {
ErtsRunQueue *prt_rq;
Port *prt = erts_dequeue_port(vrq);
- RUNQ_SET_RQ(&prt->run_queue, rq);
- erts_smp_runq_unlock(vrq);
-
- /*
- * The port might terminate while
- * we have no lock on it...
- */
-
+ erts_set_runq_port(prt, rq);
+ erts_runq_unlock(vrq);
prt_rq = erts_port_runq(prt);
- if (!prt_rq)
- return 0;
- else {
- if (prt_rq != rq)
- erts_exit(ERTS_ABORT_EXIT,
- "%s:%d:%s() internal error\n",
- __FILE__, __LINE__, __func__);
- *rq_lockedp = 1;
- erts_enqueue_port(rq, prt);
- return !0;
- }
+ if (prt_rq != rq)
+ ERTS_INTERNAL_ERROR("Unexpected run-queue");
+ *rq_lockedp = 1;
+ erts_enqueue_port(rq, prt);
+ return !0;
}
- erts_smp_runq_unlock(vrq);
+ erts_runq_unlock(vrq);
return 0;
}
@@ -4366,8 +4391,7 @@ check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix)
{
ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix);
Uint32 flags = ERTS_RUNQ_FLGS_GET(vrq);
- if ((flags & (ERTS_RUNQ_FLG_NONEMPTY
- | ERTS_RUNQ_FLG_PROTECTED)) == ERTS_RUNQ_FLG_NONEMPTY)
+ if (runq_got_work_to_execute_flags(flags) & (!(flags & ERTS_RUNQ_FLG_PROTECTED)))
return try_steal_task_from_victim(rq, rq_lockedp, vrq, flags);
else
return 0;
@@ -4388,7 +4412,7 @@ try_steal_task(ErtsRunQueue *rq)
res = 0;
rq_locked = 1;
- ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, rq_locked);
+ ERTS_LC_CHK_RUNQ_LOCK(rq, rq_locked);
get_no_runqs(&active_rqs, &blnc_rqs);
@@ -4401,7 +4425,7 @@ try_steal_task(ErtsRunQueue *rq)
if (active_rqs < blnc_rqs) {
int no = blnc_rqs - active_rqs;
int stop_ix = vix = active_rqs + rq->ix % no;
- while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) {
+ while (erts_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) {
res = check_possible_steal_victim(rq, &rq_locked, vix);
if (res)
goto done;
@@ -4416,7 +4440,7 @@ try_steal_task(ErtsRunQueue *rq)
vix = rq->ix;
/* ... then try to steal a job from another active queue... */
- while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) {
+ while (erts_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) {
vix++;
if (vix >= active_rqs)
vix = 0;
@@ -4433,13 +4457,11 @@ try_steal_task(ErtsRunQueue *rq)
done:
if (!rq_locked)
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
- if (!res)
- res = rq->halt_in_progress ?
- !ERTS_EMPTY_RUNQ_PORTS(rq) : !ERTS_EMPTY_RUNQ(rq);
-
- return res;
+ if (res)
+ return res;
+ return runq_got_work_to_execute(rq);
}
/* Run queue balancing */
@@ -4561,7 +4583,7 @@ alloc_mpaths(void)
{
void *block;
ErtsMigrationPaths *res;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx));
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&balance_info.update_mtx));
res = mpaths.freelist;
if (res) {
@@ -4584,7 +4606,7 @@ retire_mpaths(ErtsMigrationPaths *mps)
{
ErtsThrPrgrVal current;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx));
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&balance_info.update_mtx));
current = erts_thr_progress_current();
@@ -4630,7 +4652,7 @@ check_balance(ErtsRunQueue *c_rq)
int sched_util_balancing;
#endif
- if (erts_smp_atomic32_xchg_nob(&balance_info.checking_balance, 1)) {
+ if (erts_atomic32_xchg_nob(&balance_info.checking_balance, 1)) {
c_rq->check_balance_reds = INT_MAX;
return;
}
@@ -4638,15 +4660,24 @@ check_balance(ErtsRunQueue *c_rq)
get_no_runqs(NULL, &blnc_no_rqs);
if (blnc_no_rqs == 1) {
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
+ erts_atomic32_set_nob(&balance_info.checking_balance, 0);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ c_rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
+ if ((ERTS_RUNQ_FLGS_GET_NOB(c_rq) & (ERTS_RUNQ_FLG_OUT_OF_WORK|ERTS_RUNQ_FLG_CHECKIO))
+ == 0) {
+ ERTS_RUNQ_FLGS_SET(c_rq, ERTS_RUNQ_FLG_CHECKIO);
+ erts_start_timer_callback(ERTS_POLL_SCHEDULER_POLLING_TIMEOUT, check_io_timer, NULL);
+ }
+ ERTS_RUNQ_FLGS_UNSET(c_rq, ERTS_RUNQ_FLGS_MIGRATION_INFO);
+#endif
return;
}
- erts_smp_runq_unlock(c_rq);
+ erts_runq_unlock(c_rq);
if (balance_info.halftime) {
balance_info.halftime = 0;
- erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
+ erts_atomic32_set_nob(&balance_info.checking_balance, 0);
ERTS_FOREACH_RUNQ(rq,
{
if (rq->waiting)
@@ -4656,7 +4687,7 @@ check_balance(ErtsRunQueue *c_rq)
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
});
- erts_smp_runq_lock(c_rq);
+ erts_runq_lock(c_rq);
return;
}
@@ -4669,7 +4700,7 @@ check_balance(ErtsRunQueue *c_rq)
* is manipulated. Such updates of the migration information
* might clash with balancing.
*/
- erts_smp_mtx_lock(&balance_info.update_mtx);
+ erts_mtx_lock(&balance_info.update_mtx);
forced = balance_info.forced_check_balance;
balance_info.forced_check_balance = 0;
@@ -4677,10 +4708,10 @@ check_balance(ErtsRunQueue *c_rq)
get_no_runqs(&current_active, &blnc_no_rqs);
if (blnc_no_rqs == 1) {
- erts_smp_mtx_unlock(&balance_info.update_mtx);
- erts_smp_runq_lock(c_rq);
+ erts_mtx_unlock(&balance_info.update_mtx);
+ erts_runq_lock(c_rq);
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
+ erts_atomic32_set_nob(&balance_info.checking_balance, 0);
return;
}
@@ -4696,7 +4727,7 @@ check_balance(ErtsRunQueue *c_rq)
/* Read balance information for all run queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(qix);
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
run_queue_info[qix].flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
@@ -4724,7 +4755,7 @@ check_balance(ErtsRunQueue *c_rq)
run_queue_info[qix].sched_util = erts_get_sched_util(rq, 1, 0);
#endif
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
}
full_scheds = 0;
@@ -4806,7 +4837,7 @@ check_balance(ErtsRunQueue *c_rq)
sched_util_balancing = 1;
/*
* In order to avoid renaming a large amount of fields
- * we write utilization values instead of lenght values
+ * we write utilization values instead of length values
* in the 'max_len' and 'migration_limit' fields...
*/
for (qix = 0; qix < blnc_no_rqs; qix++) {
@@ -5158,15 +5189,30 @@ erts_fprintf(stderr, "--------------------------------\n");
/* Publish new migration paths... */
erts_atomic_set_wb(&erts_migration_paths, (erts_aint_t) new_mpaths);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ if (full_scheds == current_active) {
+ ERTS_ASSERT(full_scheds <= current_active);
+ /* All active schedulers ran for full, we need to do active polling,
+ so we setup a timer that does active polling */
+ if (!(ERTS_RUNQ_FLGS_GET_NOB(c_rq) & ERTS_RUNQ_FLG_CHECKIO)) {
+ /* Active polling is not running, start it */
+ erts_start_timer_callback(ERTS_POLL_SCHEDULER_POLLING_TIMEOUT, check_io_timer, NULL);
+ }
+ run_queue_info[c_rq->ix].flags |= ERTS_RUNQ_FLG_CHECKIO;
+ }
+#endif
+
/* Reset balance statistics in all online queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
Uint32 flags = run_queue_info[qix].flags;
ErtsRunQueue *rq = ERTS_RUNQ_IX(qix);
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK));
if (rq->waiting)
flags |= ERTS_RUNQ_FLG_OUT_OF_WORK;
+ if (rq != c_rq)
+ flags &= ~ERTS_RUNQ_FLG_CHECKIO;
rq->full_reds_history_sum
= run_queue_info[qix].full_reds_history_sum;
@@ -5176,29 +5222,28 @@ erts_fprintf(stderr, "--------------------------------\n");
ERTS_DBG_CHK_FULL_REDS_HISTORY(rq);
rq->out_of_work_count = 0;
- (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags);
-
- rq->max_len = erts_smp_atomic32_read_dirty(&rq->len);
+ (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO|ERTS_RUNQ_FLG_CHECKIO, flags);
+ rq->max_len = erts_atomic32_read_dirty(&rq->len);
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
ErtsRunQueueInfo *rqi;
rqi = (pix == ERTS_PORT_PRIO_LEVEL
? &rq->ports.info
: &rq->procs.prio_info[pix]);
- erts_smp_reset_max_len(rq, rqi);
+ erts_reset_max_len(rq, rqi);
rqi->reds = 0;
}
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
}
- erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
+ erts_atomic32_set_nob(&balance_info.checking_balance, 0);
balance_info.n++;
retire_mpaths(old_mpaths);
- erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_mtx_unlock(&balance_info.update_mtx);
- erts_smp_runq_lock(c_rq);
+ erts_runq_lock(c_rq);
}
static void
@@ -5206,7 +5251,7 @@ change_no_used_runqs(int used)
{
ErtsMigrationPaths *new_mpaths, *old_mpaths;
int qix;
- erts_smp_mtx_lock(&balance_info.update_mtx);
+ erts_mtx_lock(&balance_info.update_mtx);
set_no_used_runqs(used);
old_mpaths = erts_get_migration_paths_managed();
@@ -5253,28 +5298,23 @@ change_no_used_runqs(int used)
/* Make sure that we balance soon... */
balance_info.forced_check_balance = 1;
- erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_mtx_unlock(&balance_info.update_mtx);
- erts_smp_runq_lock(ERTS_RUNQ_IX(0));
+ erts_runq_lock(ERTS_RUNQ_IX(0));
ERTS_RUNQ_IX(0)->check_balance_reds = 0;
- erts_smp_runq_unlock(ERTS_RUNQ_IX(0));
+ erts_runq_unlock(ERTS_RUNQ_IX(0));
}
-#endif /* #ifdef ERTS_SMP */
Uint
erts_debug_nbalance(void)
{
-#ifdef ERTS_SMP
Uint n;
- erts_smp_mtx_lock(&balance_info.update_mtx);
+ erts_mtx_lock(&balance_info.update_mtx);
n = balance_info.n;
- erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_mtx_unlock(&balance_info.update_mtx);
return n;
-#else
- return 0;
-#endif
}
/* Wakeup other schedulers */
@@ -5320,26 +5360,36 @@ typedef enum {
#define ERTS_WAKEUP_OTHER_DEC_LEGACY 10
#define ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY (CONTEXT_REDS/10)
-#ifdef ERTS_SMP
-static struct {
+typedef struct {
ErtsSchedWakeupOtherThreshold threshold;
ErtsSchedWakeupOtherType type;
int limit;
int dec_shift;
int dec_mask;
void (*check)(ErtsRunQueue *rq, Uint32 flags);
-} wakeup_other;
+} ErtsWakeupOtherParams;
+
+static ErtsWakeupOtherParams sched_wakeup_other_params[ERTS_SCHED_TYPE_LAST + 1];
+
+static ERTS_INLINE ErtsWakeupOtherParams *
+runq_get_wakeup_other_params(ErtsRunQueue *rq)
+{
+ ErtsSchedulerData *esdp = rq->scheduler;
+ return &sched_wakeup_other_params[esdp->type];
+}
static void
wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
{
+ ErtsWakeupOtherParams *wo_params = runq_get_wakeup_other_params(rq);
int wo_reds = rq->wakeup_other_reds;
+
if (wo_reds) {
- int left_len = erts_smp_atomic32_read_dirty(&rq->len) - 1;
+ int left_len = erts_atomic32_read_dirty(&rq->len) - 1;
if (left_len < 1) {
- int wo_reduce = wo_reds << wakeup_other.dec_shift;
- wo_reduce &= wakeup_other.dec_mask;
+ int wo_reduce = wo_reds << wo_params->dec_shift;
+ wo_reduce &= wo_params->dec_mask;
rq->wakeup_other -= wo_reduce;
if (rq->wakeup_other < 0)
rq->wakeup_other = 0;
@@ -5347,17 +5397,15 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
else {
rq->wakeup_other += (left_len*wo_reds
+ ERTS_WAKEUP_OTHER_FIXED_INC);
- if (rq->wakeup_other > wakeup_other.limit) {
-#ifdef ERTS_DIRTY_SCHEDULERS
+ if (rq->wakeup_other > wo_params->limit) {
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
if (rq->waiting) {
- wake_dirty_schedulers(rq, 1);
+ wake_dirty_scheduler(rq);
}
} else
-#endif
{
int empty_rqs =
- erts_smp_atomic32_read_acqb(&no_empty_run_queues);
+ erts_atomic32_read_acqb(&no_empty_run_queues);
if (flags & ERTS_RUNQ_FLG_PROTECTED)
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
if (empty_rqs != 0)
@@ -5371,56 +5419,58 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
}
static void
-wakeup_other_set_limit(void)
+wakeup_other_set_limit(ErtsWakeupOtherParams *params)
{
- switch (wakeup_other.threshold) {
+ switch (params->threshold) {
case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH:
- wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH;
- wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_HIGH;
+ params->limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH;
+ params->dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_HIGH;
break;
case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH:
- wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH;
- wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_HIGH;
+ params->limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH;
+ params->dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_HIGH;
break;
case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM:
- wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM;
- wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_MEDIUM;
+ params->limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM;
+ params->dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_MEDIUM;
break;
case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW:
- wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_LOW;
- wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_LOW;
+ params->limit = ERTS_WAKEUP_OTHER_LIMIT_LOW;
+ params->dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_LOW;
break;
case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW:
- wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW;
- wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_LOW;
+ params->limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW;
+ params->dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_LOW;
break;
}
- if (wakeup_other.dec_shift < 0)
- wakeup_other.dec_mask = (1 << (sizeof(wakeup_other.dec_mask)*8
- + wakeup_other.dec_shift)) - 1;
+
+ if (params->dec_shift < 0)
+ params->dec_mask = (1 << (sizeof(params->dec_mask)*8
+ + params->dec_shift)) - 1;
else {
- wakeup_other.dec_mask = 0;
- wakeup_other.dec_mask = ~wakeup_other.dec_mask;
+ params->dec_mask = 0;
+ params->dec_mask = ~params->dec_mask;
}
}
static void
wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags)
{
+ ErtsWakeupOtherParams *wo_params = runq_get_wakeup_other_params(rq);
int wo_reds = rq->wakeup_other_reds;
if (wo_reds) {
- erts_aint32_t len = erts_smp_atomic32_read_dirty(&rq->len);
+ erts_aint32_t len = erts_atomic32_read_dirty(&rq->len);
if (len < 2) {
rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds;
if (rq->wakeup_other < 0)
rq->wakeup_other = 0;
}
- else if (rq->wakeup_other < wakeup_other.limit)
+ else if (rq->wakeup_other < wo_params->limit)
rq->wakeup_other += len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY;
else {
if (flags & ERTS_RUNQ_FLG_PROTECTED)
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
- if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) {
+ if (erts_atomic32_read_acqb(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
rq->wakeup_other = 0;
}
@@ -5431,23 +5481,23 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags)
}
static void
-wakeup_other_set_limit_legacy(void)
+wakeup_other_set_limit_legacy(ErtsWakeupOtherParams *params)
{
- switch (wakeup_other.threshold) {
+ switch (params->threshold) {
case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH:
- wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH_LEGACY;
+ params->limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH_LEGACY;
break;
case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH:
- wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH_LEGACY;
+ params->limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH_LEGACY;
break;
case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM:
- wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM_LEGACY;
+ params->limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM_LEGACY;
break;
case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW:
- wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_LOW_LEGACY;
+ params->limit = ERTS_WAKEUP_OTHER_LIMIT_LOW_LEGACY;
break;
case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW:
- wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW_LEGACY;
+ params->limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW_LEGACY;
break;
}
}
@@ -5455,15 +5505,21 @@ wakeup_other_set_limit_legacy(void)
static void
set_wakeup_other_data(void)
{
- switch (wakeup_other.type) {
- case ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT:
- wakeup_other.check = wakeup_other_check;
- wakeup_other_set_limit();
- break;
- case ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY:
- wakeup_other.check = wakeup_other_check_legacy;
- wakeup_other_set_limit_legacy();
- break;
+ ErtsSchedType type;
+
+ for (type = ERTS_SCHED_TYPE_FIRST; type <= ERTS_SCHED_TYPE_LAST; type++) {
+ ErtsWakeupOtherParams *params = &sched_wakeup_other_params[type];
+
+ switch (params->type) {
+ case ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT:
+ params->check = wakeup_other_check;
+ wakeup_other_set_limit(params);
+ break;
+ case ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY:
+ params->check = wakeup_other_check_legacy;
+ wakeup_other_set_limit_legacy(params);
+ break;
+ }
}
}
@@ -5471,7 +5527,7 @@ static int
no_runqs_to_supervise(void)
{
int used;
- erts_aint32_t nerq = erts_smp_atomic32_read_acqb(&no_empty_run_queues);
+ erts_aint32_t nerq = erts_atomic32_read_acqb(&no_empty_run_queues);
if (nerq <= 0)
return 0;
get_no_runqs(NULL, &used);
@@ -5504,89 +5560,78 @@ runq_supervisor(void *unused)
for (ix = 0; ix < no_rqs; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
if (ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY) {
- erts_smp_runq_lock(rq);
- if (erts_smp_atomic32_read_dirty(&rq->len) != 0)
+ erts_runq_lock(rq);
+ if (erts_atomic32_read_dirty(&rq->len) != 0)
wake_scheduler_on_empty_runq(rq); /* forced wakeup... */
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
}
}
}
return NULL;
}
-#endif
void
erts_early_init_scheduling(int no_schedulers)
{
+ ErtsSchedType type;
+
aux_work_timeout_early_init(no_schedulers);
-#ifdef ERTS_SMP
- wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM;
- wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT;
-#endif
- sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM;
- sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
- * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT);
- sched_busy_wait.aux_work = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
- * ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM);
+
+ for (type = ERTS_SCHED_TYPE_FIRST; type <= ERTS_SCHED_TYPE_LAST; type++) {
+ erts_sched_set_wakeup_other_threshold(type, "medium");
+ erts_sched_set_wakeup_other_type(type, "default");
+
+ erts_sched_set_busy_wait_threshold(type, "medium");
+ }
+
+ erts_sched_set_busy_wait_threshold(ERTS_SCHED_DIRTY_CPU, "short");
+ erts_sched_set_busy_wait_threshold(ERTS_SCHED_DIRTY_IO, "short");
}
int
-erts_sched_set_wakeup_other_thresold(char *str)
-{
-#ifdef ERTS_SMP
- ErtsSchedWakeupOtherThreshold threshold;
- if (sys_strcmp(str, "very_high") == 0)
- threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH;
- else if (sys_strcmp(str, "high") == 0)
- threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH;
- else if (sys_strcmp(str, "medium") == 0)
- threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM;
- else if (sys_strcmp(str, "low") == 0)
- threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW;
- else if (sys_strcmp(str, "very_low") == 0)
- threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW;
- else
- return EINVAL;
- wakeup_other.threshold = threshold;
- set_wakeup_other_data();
+erts_sched_set_wakeup_other_threshold(ErtsSchedType sched_type, char *str)
+{
+ ErtsWakeupOtherParams *params = &sched_wakeup_other_params[sched_type];
+
+ if (sys_strcmp(str, "very_high") == 0) {
+ params->threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH;
+ } else if (sys_strcmp(str, "high") == 0) {
+ params->threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH;
+ } else if (sys_strcmp(str, "medium") == 0) {
+ params->threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM;
+ } else if (sys_strcmp(str, "low") == 0) {
+ params->threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW;
+ } else if (sys_strcmp(str, "very_low") == 0) {
+ params->threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW;
+ } else {
+ return EINVAL;
+ }
+
return 0;
-#else
- if (sys_strcmp(str, "very_high") == 0 || sys_strcmp(str, "high") == 0 ||
- sys_strcmp(str, "medium") == 0 || sys_strcmp(str, "low") == 0 ||
- sys_strcmp(str, "very_low") == 0) {
- return 0;
- }
- return EINVAL;
-#endif
}
int
-erts_sched_set_wakeup_other_type(char *str)
+erts_sched_set_wakeup_other_type(ErtsSchedType sched_type, char *str)
{
-#ifdef ERTS_SMP
- ErtsSchedWakeupOtherType type;
- if (sys_strcmp(str, "default") == 0)
- type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT;
- else if (sys_strcmp(str, "legacy") == 0)
- type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY;
- else
- return EINVAL;
- wakeup_other.type = type;
+ ErtsWakeupOtherParams *params = &sched_wakeup_other_params[sched_type];
+
+ if (sys_strcmp(str, "default") == 0) {
+ params->type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT;
+ } else if (sys_strcmp(str, "legacy") == 0) {
+ params->type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY;
+ } else {
+ return EINVAL;
+ }
+
return 0;
-#else
- if (sys_strcmp(str, "default") == 0 || sys_strcmp(str, "legacy") == 0) {
- return 0;
- }
- return EINVAL;
-#endif
}
int
-erts_sched_set_busy_wait_threshold(char *str)
+erts_sched_set_busy_wait_threshold(ErtsSchedType sched_type, char *str)
{
- int sys_sched;
- int aux_work_fact;
+ ErtsBusyWaitParams *params = &sched_busy_wait_params[sched_type];
+ int aux_work_fact, sys_sched;
if (sys_strcmp(str, "very_long") == 0) {
sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG;
@@ -5616,9 +5661,8 @@ erts_sched_set_busy_wait_threshold(char *str)
return EINVAL;
}
- sched_busy_wait.sys_schedule = sys_sched;
- sched_busy_wait.tse = sys_sched*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
- sched_busy_wait.aux_work = sys_sched*aux_work_fact;
+ params->tse = sys_sched * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
+ params->aux_work = sys_sched * aux_work_fact;
return 0;
}
@@ -5644,17 +5688,30 @@ erts_sched_set_wake_cleanup_threshold(char *str)
static void
init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
{
- if (!esdp)
- awdp->sched_id = 0;
-#ifdef ERTS_DIRTY_SCHEDULERS
- else if (ERTS_SCHEDULER_IS_DIRTY(esdp))
- awdp->sched_id = (int) ERTS_DIRTY_SCHEDULER_NO(esdp);
-#endif
- else
- awdp->sched_id = (int) esdp->no;
+ int id = 0;
+ if (esdp) {
+ switch (esdp->type) {
+ case ERTS_SCHED_NORMAL:
+ id = (int) esdp->no;
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ id = (int) erts_no_schedulers;
+ id += (int) esdp->dirty_no;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ id = (int) erts_no_schedulers;
+ id += (int) erts_no_dirty_cpu_schedulers;
+ id += (int) esdp->dirty_no;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ break;
+ }
+ }
+
+ awdp->sched_id = id;
awdp->esdp = esdp;
awdp->ssi = esdp ? esdp->ssi : NULL;
-#ifdef ERTS_SMP
awdp->latest_wakeup = ERTS_THR_PRGR_VAL_FIRST;
awdp->misc.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
@@ -5663,15 +5720,9 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
awdp->later_op.size = 0;
awdp->later_op.first = NULL;
awdp->later_op.last = NULL;
-#endif
-#ifdef ERTS_USE_ASYNC_READY_Q
-#ifdef ERTS_SMP
awdp->async_ready.need_thr_prgr = 0;
awdp->async_ready.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
-#endif
awdp->async_ready.queue = NULL;
-#endif
-#ifdef ERTS_SMP
awdp->delayed_wakeup.next = ERTS_DELAYED_WAKEUP_INFINITY;
if (!dawwp) {
awdp->delayed_wakeup.job = NULL;
@@ -5687,7 +5738,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
for (i = 0; i <= erts_no_schedulers; i++)
awdp->delayed_wakeup.sched2jix[i] = -1;
}
-#endif
awdp->debug.wait_completed.flags = 0;
awdp->debug.wait_completed.callback = NULL;
awdp->debug.wait_completed.arg = NULL;
@@ -5698,14 +5748,13 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
ErtsSchedulerSleepInfo* ssi,
ErtsRunQueue* runq,
char** daww_ptr, size_t daww_sz,
- Process *shadow_proc)
+ Process *shadow_proc,
+ Uint64 time_stamp)
{
esdp->timer_wheel = NULL;
-#ifdef ERTS_SMP
erts_bits_init_state(&esdp->erl_bits_state);
esdp->match_pseudo_process = NULL;
esdp->free_process = NULL;
-#endif
esdp->x_reg_array =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
ERTS_X_REGS_ALLOCATED *
@@ -5713,40 +5762,51 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->f_reg_array =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
MAX_REG * sizeof(FloatDef));
-#ifdef ERTS_DIRTY_SCHEDULERS
+ esdp->run_queue = runq;
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) {
esdp->no = 0;
- ERTS_DIRTY_SCHEDULER_NO(esdp) = (Uint) num;
+ if (runq == ERTS_DIRTY_CPU_RUNQ)
+ esdp->type = ERTS_SCHED_DIRTY_CPU;
+ else {
+ ASSERT(runq == ERTS_DIRTY_IO_RUNQ);
+ esdp->type = ERTS_SCHED_DIRTY_IO;
+ }
+ esdp->dirty_no = (Uint) num;
+ if (num == 1) {
+ /*
+ * Multi-scheduling block functionality depends
+ * on finding dirty scheduler number 1 here...
+ */
+ runq->scheduler = esdp;
+ }
}
else {
+ esdp->type = ERTS_SCHED_NORMAL;
esdp->no = (Uint) num;
- ERTS_DIRTY_SCHEDULER_NO(esdp) = 0;
+ esdp->dirty_no = 0;
+ runq->scheduler = esdp;
}
esdp->dirty_shadow_process = shadow_proc;
if (shadow_proc) {
erts_init_empty_process(shadow_proc);
- erts_smp_atomic32_init_nob(&shadow_proc->state,
+ erts_atomic32_init_nob(&shadow_proc->state,
(ERTS_PSFLG_ACTIVE
| ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_PROXY));
shadow_proc->static_flags = ERTS_STC_FLG_SHADOW_PROC;
}
-#else
- esdp->no = (Uint) num;
-#endif
+ ssi->esdp = esdp;
esdp->ssi = ssi;
esdp->current_process = NULL;
esdp->current_port = NULL;
+ esdp->current_nif = NULL;
esdp->virtual_reds = 0;
esdp->cpu_id = -1;
erts_init_atom_cache_map(&esdp->atom_cache_map);
- esdp->run_queue = runq;
- esdp->run_queue->scheduler = esdp;
-
esdp->last_monotonic_time = 0;
esdp->check_time_reds = 0;
@@ -5756,41 +5816,39 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->io.out = (Uint64) 0;
esdp->io.in = (Uint64) 0;
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+#ifdef DEBUG
+ esdp->pending_signal.dbg_from = NULL;
+#endif
+
if (daww_ptr) {
init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr);
-#ifdef ERTS_SMP
*daww_ptr += daww_sz;
-#endif
}
esdp->reductions = 0;
- init_sched_wall_time(&esdp->sched_wall_time);
+ init_sched_wall_time(esdp, time_stamp);
erts_port_task_handle_init(&esdp->nosuspend_port_task_handle);
}
void
-erts_init_scheduling(int no_schedulers, int no_schedulers_online
-#ifdef ERTS_DIRTY_SCHEDULERS
- , int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online,
+erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_threads,
+ int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online,
int no_dirty_io_schedulers
-#endif
)
{
- int ix, n, no_ssi;
+ int ix, n, no_ssi, tot_rqs;
char *daww_ptr;
size_t daww_sz;
size_t size_runqs;
-#ifdef ERTS_SMP
erts_aint32_t set_schdlr_sspnd_change_flags;
-#endif
init_misc_op_list_alloc();
init_proc_sys_task_queues_alloc();
-#ifdef ERTS_SMP
set_wakeup_other_data();
-#endif
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
if (erts_sched_balance_util)
@@ -5800,36 +5858,29 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ASSERT(no_schedulers_online <= no_schedulers);
ASSERT(no_schedulers_online >= 1);
ASSERT(no_schedulers >= 1);
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(no_dirty_cpu_schedulers <= no_schedulers);
ASSERT(no_dirty_cpu_schedulers >= 1);
ASSERT(no_dirty_cpu_schedulers_online <= no_schedulers_online);
ASSERT(no_dirty_cpu_schedulers_online >= 1);
-#endif
+ ASSERT(erts_no_poll_threads == no_poll_threads);
/* Create and initialize run queues */
n = no_schedulers;
- size_runqs = sizeof(ErtsAlignedRunQueue) * (n + ERTS_NUM_DIRTY_RUNQS);
+ tot_rqs = (n + ERTS_NUM_DIRTY_RUNQS);
+ size_runqs = sizeof(ErtsAlignedRunQueue) * tot_rqs;
erts_aligned_run_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs);
-#ifdef ERTS_SMP
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_aligned_run_queues += ERTS_NUM_DIRTY_RUNQS;
-#endif
- erts_smp_atomic32_init_nob(&no_empty_run_queues, 0);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ erts_atomic32_init_nob(&doing_sys_schedule, 0);
#endif
+ erts_atomic32_init_nob(&no_empty_run_queues, 0);
erts_no_run_queues = n;
- for (ix = -(ERTS_NUM_DIRTY_RUNQS); ix < n; ix++) {
+ for (ix = 0; ix < tot_rqs; ix++) {
int pix, rix;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsRunQueue *rq = ERTS_RUNQ_IX_IS_DIRTY(ix) ?
- ERTS_DIRTY_RUNQ_IX(ix) : ERTS_RUNQ_IX(ix);
-#else
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
-#endif
rq->ix = ix;
@@ -5837,16 +5888,16 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
* id if the esdp->no <-> ix+1 mapping change.
*/
- erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
- erts_smp_cnd_init(&rq->cnd);
+ erts_mtx_init(&rq->mtx, "run_queue", make_small(ix + 1),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+ erts_cnd_init(&rq->cnd);
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
- if (ERTS_RUNQ_IX_IS_DIRTY(ix))
- erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list");
+ if (ERTS_RUNQ_IX_IS_DIRTY(ix)) {
+ erts_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list",
+ make_small(ix + 1),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+ }
rq->sleepers.list = NULL;
-#endif
-#endif
rq->waiting = 0;
rq->woken = 0;
@@ -5859,17 +5910,15 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
}
rq->out_of_work_count = 0;
rq->max_len = 0;
- erts_smp_atomic32_set_nob(&rq->len, 0);
+ erts_atomic32_set_nob(&rq->len, 0);
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
- rq->halt_in_progress = 0;
- rq->procs.pending_exiters = NULL;
rq->procs.context_switches = 0;
rq->procs.reductions = 0;
for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
- erts_smp_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0);
+ erts_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0);
rq->procs.prio_info[pix].max_len = 0;
rq->procs.prio_info[pix].reds = 0;
if (pix < ERTS_NO_PROC_PRIO_LEVELS - 1) {
@@ -5881,7 +5930,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
rq->misc.start = NULL;
rq->misc.end = NULL;
- erts_smp_atomic32_init_nob(&rq->ports.info.len, 0);
+ erts_atomic32_init_nob(&rq->ports.info.len, 0);
rq->ports.info.max_len = 0;
rq->ports.info.reds = 0;
rq->ports.start = NULL;
@@ -5893,7 +5942,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
}
-#ifdef ERTS_SMP
if (erts_no_run_queues != 1) {
run_queue_info = erts_alloc(ERTS_ALC_T_RUNQ_BLNS,
@@ -5904,49 +5952,42 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
* erts_no_run_queues));
}
-#endif
n = (int) no_schedulers;
erts_no_schedulers = n;
-#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_no_total_schedulers = n;
erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers;
+ erts_no_total_schedulers += no_dirty_cpu_schedulers;
erts_no_dirty_io_schedulers = no_dirty_io_schedulers;
-#endif
+ erts_no_total_schedulers += no_dirty_io_schedulers;
/* Create and initialize scheduler sleep info */
-#ifdef ERTS_SMP
- no_ssi = n+1;
-#else
- no_ssi = 1;
-#endif
+ no_ssi = n + 1 /* aux thread */;
aligned_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_SLP_INFO,
no_ssi*sizeof(ErtsAlignedSchedulerSleepInfo));
for (ix = 0; ix < no_ssi; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_sched_sleep_info[ix].ssi;
-#ifdef ERTS_SMP
#if 0 /* no need to initialize these... */
ssi->next = NULL;
ssi->prev = NULL;
#endif
- erts_smp_atomic32_init_nob(&ssi->flags, 0);
+ ssi->esdp = NULL;
+ erts_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_thread_func */
-#endif
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
-#ifdef ERTS_SMP
- aligned_sched_sleep_info++;
+ aligned_sched_sleep_info += 1 /* aux thread */;
-#ifdef ERTS_DIRTY_SCHEDULERS
aligned_dirty_cpu_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_SLP_INFO,
no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi;
- erts_smp_atomic32_init_nob(&ssi->flags, 0);
+ erts_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
@@ -5956,38 +5997,43 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo));
for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi;
- erts_smp_atomic32_init_nob(&ssi->flags, 0);
+ erts_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
-#endif
-#endif
+
+ aligned_poll_thread_sleep_info =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_SLP_INFO,
+ no_poll_threads*sizeof(ErtsAlignedSchedulerSleepInfo));
+ for (ix = 0; ix < no_poll_threads; ix++) {
+ ErtsSchedulerSleepInfo *ssi = &aligned_poll_thread_sleep_info[ix].ssi;
+ ssi->esdp = NULL;
+ erts_atomic32_init_nob(&ssi->flags, 0);
+ ssi->event = NULL; /* initialized in poll_thread */
+ erts_atomic32_init_nob(&ssi->aux_work, 0);
+ }
/* Create and initialize scheduler specific data */
-#ifdef ERTS_SMP
daww_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE((sizeof(ErtsDelayedAuxWorkWakeupJob)
+ sizeof(int))*(n+1));
daww_ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
daww_sz*n);
-#else
- daww_sz = 0;
- daww_ptr = NULL;
-#endif
erts_aligned_scheduler_data =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
- n*sizeof(ErtsAlignedSchedulerData));
+ n*sizeof(ErtsAlignedSchedulerData));
for (ix = 0; ix < n; ix++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_SCHED_SLEEP_INFO_IX(ix),
ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz,
- NULL);
+ NULL, 0);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
{
+ Uint64 ts = sched_wall_time_ts();
int dirty_scheds = no_dirty_cpu_schedulers + no_dirty_io_schedulers;
int adspix = 0;
ErtsAlignedDirtyShadowProcess *adsp =
@@ -6007,16 +6053,15 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
ERTS_DIRTY_CPU_RUNQ, NULL, 0,
- &adsp[adspix++].dsp);
+ &adsp[adspix++].dsp, ts);
}
for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
ERTS_DIRTY_IO_RUNQ, NULL, 0,
- &adsp[adspix++].dsp);
+ &adsp[adspix++].dsp, ts);
}
}
-#endif
init_misc_aux_work();
init_swtreq_alloc();
@@ -6025,19 +6070,22 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_atomic32_init_nob(&debug_wait_completed_count, 0); /* debug only */
debug_wait_completed_flags = 0;
-#ifdef ERTS_SMP
-
aux_thread_aux_work_data =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
sizeof(ErtsAuxWorkData));
+ poll_thread_aux_work_data =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
+ no_poll_threads * sizeof(ErtsAuxWorkData));
+
init_no_runqs(no_schedulers_online, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
- erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
+ erts_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
balance_info.forced_check_balance = 0;
balance_info.halftime = 1;
balance_info.full_reds_history_index = 0;
- erts_smp_atomic32_init_nob(&balance_info.checking_balance, 0);
+ erts_atomic32_init_nob(&balance_info.checking_balance, 0);
balance_info.prev_rise.active_runqs = 0;
balance_info.prev_rise.max_len = 0;
balance_info.prev_rise.reds = 0;
@@ -6068,7 +6116,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
suspend_run_queue(ERTS_RUNQ_IX(ix));
}
-#ifdef ERTS_DIRTY_SCHEDULERS
schdlr_sspnd_set_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_DIRTY_CPU,
@@ -6085,7 +6132,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
set_schdlr_sspnd_change_flags |= ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN;
for (ix = no_dirty_cpu_schedulers_online; ix < no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData* esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
- erts_smp_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ erts_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
}
}
@@ -6099,52 +6146,26 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ERTS_SCHED_DIRTY_IO,
no_dirty_io_schedulers);
-#endif
+ erts_atomic32_init_nob(&dirty_count.cpu.active,
+ (erts_aint32_t) no_dirty_cpu_schedulers);
+ erts_atomic32_init_nob(&dirty_count.io.active,
+ (erts_aint32_t) no_dirty_io_schedulers);
+
if (set_schdlr_sspnd_change_flags)
- erts_smp_atomic32_set_nob(&schdlr_sspnd.changing,
+ erts_atomic32_set_nob(&schdlr_sspnd.changing,
set_schdlr_sspnd_change_flags);
- erts_smp_atomic32_init_nob(&doing_sys_schedule, 0);
-
init_misc_aux_work();
-#else /* !ERTS_SMP */
- {
- ErtsSchedulerData *esdp;
- esdp = ERTS_SCHEDULER_IX(0);
- erts_scheduler_data = esdp;
-#ifdef USE_THREADS
- erts_tsd_set(sched_data_key, (void *) esdp);
-#endif
- }
- erts_no_schedulers = 1;
- erts_no_dirty_cpu_schedulers = 0;
- erts_no_dirty_io_schedulers = 0;
-#endif
-
- erts_smp_atomic32_init_nob(&function_calls, 0);
/* init port tasks */
erts_port_task_init();
- aux_work_timeout_late_init();
-#ifndef ERTS_SMP
-#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
- erts_scheduler_data->verify_unused_temp_alloc
- = erts_alloc_get_verify_unused_temp_alloc(
- &erts_scheduler_data->verify_unused_temp_alloc_data);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
-#endif
-#endif
-
- erts_smp_atomic32_init_relb(&erts_halt_progress, -1);
+ erts_atomic32_init_relb(&erts_halt_progress, -1);
erts_halt_code = 0;
-#if !defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- erts_lc_set_thread_name("scheduler 1");
-#endif
}
@@ -6157,7 +6178,6 @@ erts_schedid2runq(Uint id)
return ERTS_RUNQ_IX(ix);
}
-#ifdef USE_THREADS
ErtsSchedulerData *
erts_get_scheduler_data(void)
@@ -6165,16 +6185,14 @@ erts_get_scheduler_data(void)
return (ErtsSchedulerData *) erts_tsd_get(sched_data_key);
}
-#endif
static Process *
make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
{
erts_aint32_t state;
Process *proxy;
-#ifdef ERTS_SMP
- ErtsRunQueue *rq = RUNQ_READ_RQ(&proc->run_queue);
-#endif
+ int bound;
+ ErtsRunQueue *rq = erts_get_runq_proc(proc, &bound);
state = (ERTS_PSFLG_PROXY
| ERTS_PSFLG_IN_RUNQ
@@ -6185,11 +6203,9 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
if (prev_proxy) {
proxy = prev_proxy;
- ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
- erts_smp_atomic32_set_nob(&proxy->state, state);
-#ifdef ERTS_SMP
- RUNQ_SET_RQ(&proc->run_queue, rq);
-#endif
+ ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
+ erts_atomic32_set_nob(&proxy->state, state);
+ (void) erts_set_runq_proc(proxy, rq, &bound);
}
else {
proxy = erts_alloc(ERTS_ALC_T_PROC, sizeof(Process));
@@ -6201,11 +6217,8 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
ui32[i] = (Uint32) 0xdeadbeef;
}
#endif
- erts_smp_atomic32_init_nob(&proxy->state, state);
-#ifdef ERTS_SMP
- erts_smp_atomic_init_nob(&proxy->run_queue,
- erts_smp_atomic_read_nob(&proc->run_queue));
-#endif
+ erts_atomic32_init_nob(&proxy->state, state);
+ erts_init_runq_proc(proxy, rq, bound);
}
proxy->common.id = proc->common.id;
@@ -6218,7 +6231,6 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
#define ERTS_ENQUEUE_DIRTY_CPU_QUEUE 2
#define ERTS_ENQUEUE_DIRTY_IO_QUEUE 3
-#ifdef ERTS_DIRTY_SCHEDULERS
static int
check_dirty_enqueue_in_prio_queue(Process *c_p,
@@ -6230,6 +6242,12 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
int queue;
erts_aint32_t dact, max_qbit;
+ /* Do not enqueue free process... */
+ if (actual & ERTS_PSFLG_FREE) {
+ *newp &= ~ERTS_PSFLGS_DIRTY_WORK;
+ return ERTS_ENQUEUE_NOT;
+ }
+
/* Termination should be done on an ordinary scheduler */
if ((*newp) & ERTS_PSFLG_EXITING) {
*newp &= ~ERTS_PSFLGS_DIRTY_WORK;
@@ -6243,7 +6261,7 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
if ((*newp) & ERTS_PSFLG_ACTIVE_SYS)
return ERTS_ENQUEUE_NORMAL_QUEUE;
- dact = erts_smp_atomic32_read_mb(&c_p->dirty_state);
+ dact = erts_atomic32_read_mb(&c_p->dirty_state);
if (actual & (ERTS_PSFLG_DIRTY_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_CPU_PROC)) {
max_qbit = ((dact >> ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET)
@@ -6271,7 +6289,11 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
return -1*queue;
}
- *newp |= ERTS_PSFLG_IN_RUNQ;
+ /*
+ * Enqueue using process struct.
+ */
+ *newp &= ~ERTS_PSFLGS_PRQ_PRIO_MASK;
+ *newp |= ERTS_PSFLG_IN_RUNQ | (aprio << ERTS_PSFLGS_PRQ_PRIO_OFFSET);
return queue;
}
@@ -6284,17 +6306,18 @@ fin_dirty_enq_s_change(Process *p,
erts_aint32_t qbit = 1 << enq_prio;
qbit <<= qmask_offset;
- if (qbit & erts_smp_atomic32_read_bor_mb(&p->dirty_state, qbit)) {
+ if (qbit & erts_atomic32_read_bor_mb(&p->dirty_state, qbit)) {
/* Already enqueue by someone else... */
if (pstruct_reserved) {
/* We reserved process struct for enqueue; clear it... */
-#ifdef DEBUG
- erts_aint32_t old =
-#else
- (void)
-#endif
- erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ);
- ASSERT(old & ERTS_PSFLG_IN_RUNQ);
+ erts_aint32_t state;
+
+ state = erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ);
+ ASSERT(state & ERTS_PSFLG_IN_RUNQ);
+
+ if (state & ERTS_PSFLG_FREE) {
+ erts_proc_dec_refc(p);
+ }
}
return 0;
}
@@ -6302,7 +6325,6 @@ fin_dirty_enq_s_change(Process *p,
return !0;
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
static ERTS_INLINE int
check_enqueue_in_prio_queue(Process *c_p,
@@ -6317,14 +6339,12 @@ check_enqueue_in_prio_queue(Process *c_p,
*prq_prio_p = aprio;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (actual & ERTS_PSFLGS_DIRTY_WORK) {
int res = check_dirty_enqueue_in_prio_queue(c_p, newp, actual,
aprio, qbit);
if (res != ERTS_ENQUEUE_NORMAL_QUEUE)
return res;
}
-#endif
max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK;
max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
@@ -6367,7 +6387,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
return NULL;
-#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
@@ -6388,25 +6407,25 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
return NULL;
-#endif
default: {
ErtsRunQueue* runq;
+ int bound;
ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE
|| enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE);
- runq = erts_get_runq_proc(p);
+ runq = erts_get_runq_proc(p, &bound);
-#ifdef ERTS_SMP
- if (!(ERTS_PSFLG_BOUND & state)) {
+ if (!bound) {
ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
- if (new_runq) {
- RUNQ_SET_RQ(&p->run_queue, new_runq);
- runq = new_runq;
- }
+ if (new_runq) {
+ if (erts_try_change_runq_proc(p, new_runq))
+ runq = new_runq;
+ else
+ runq = erts_get_runq_proc(p, NULL);
+ }
}
-#endif
ASSERT(runq);
@@ -6418,6 +6437,9 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
/*
* schedule_out_process() return with c_rq locked.
+ *
+ * Return non-zero value if caller should decrease
+ * reference count on the process when done with it...
*/
static ERTS_INLINE int
schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
@@ -6427,13 +6449,54 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
int enqueue; /* < 0 -> use proxy */
ErtsRunQueue* runq;
- if (is_normal_sched)
- running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS;
- else
- running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS;
+ ASSERT(!(state & (ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC))
+ || (BeamIsOpCode(*p->i, op_call_nif)
+ || BeamIsOpCode(*p->i, op_apply_bif)));
a = state;
+ /* Clear activ-sys if needed... */
+ while (1) {
+ n = e = a;
+ if (a & ERTS_PSFLG_ACTIVE_SYS) {
+ if (a & (ERTS_PSFLG_SIG_Q
+ | ERTS_PSFLG_SIG_IN_Q
+ | ERTS_PSFLG_SYS_TASKS))
+ break;
+ /* Clear active-sys */
+ n &= ~ERTS_PSFLG_ACTIVE_SYS;
+ }
+ a = erts_atomic32_cmpxchg_nob(&p->state, n, e);
+ if (a == e) {
+ a = n;
+ break;
+ }
+ }
+
+ if (!is_normal_sched)
+ running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS;
+ else {
+ running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS;
+ if ((a & ERTS_PSFLG_DIRTY_ACTIVE_SYS)
+ && (p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
+ /*
+ * Delay dirty GC; will be enabled automatically
+ * again by next GC...
+ */
+
+ /*
+ * No normal execution until dirty CLA or hibernat has
+ * been handled...
+ */
+ ASSERT(!(p->flags & (F_DIRTY_CLA | F_DIRTY_GC_HIBERNATE)));
+
+ a = erts_atomic32_read_band_nob(&p->state,
+ ~ERTS_PSFLG_DIRTY_ACTIVE_SYS);
+ a &= ~ERTS_PSFLG_DIRTY_ACTIVE_SYS;
+ }
+ }
+
while (1) {
n = e = a;
@@ -6441,12 +6504,18 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
enqueue = ERTS_ENQUEUE_NOT;
+ ASSERT(((a & (ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE))
+ != ERTS_PSFLG_EXITING)
+ || ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))
+ == ERTS_PSFLG_ACTIVE));
+
n &= ~running_flgs;
- if ((a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- || (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
+ if ((!!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ | ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE))
+ & !(a & ERTS_PSFLG_FREE)) {
enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
}
- a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
}
@@ -6458,7 +6527,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
if (erts_system_profile_flags.runnable_procs) {
/* Status lock prevents out of order "runnable proc" trace msgs */
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
if (!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS))
&& (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) {
@@ -6470,14 +6539,15 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
if (proxy)
free_proxy_proc(proxy);
- erts_smp_runq_lock(c_rq);
-
- return 0;
+ erts_runq_lock(c_rq);
+ /* Decrement refc if scheduled out from dirty scheduler... */
+ return !is_normal_sched;
}
else {
Process* sched_p;
+ ASSERT(!(n & ERTS_PSFLG_FREE));
ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS)));
@@ -6491,23 +6561,32 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
ASSERT(runq);
- erts_smp_runq_lock(runq);
+ erts_runq_lock(runq);
+
+ if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */
/* Enqueue the process */
enqueue_process(runq, (int) enq_prio, sched_p);
if (runq == c_rq)
- return 1;
+ return 0;
- erts_smp_runq_unlock(runq);
+ erts_runq_unlock(runq);
smp_notify_inc_runq(runq);
- erts_smp_runq_lock(c_rq);
+ erts_runq_lock(c_rq);
- return 1;
+ /*
+ * Decrement refc if process is scheduled out by a
+ * dirty scheduler, and we have not just scheduled
+ * the process using the ordinary process struct
+ * on a dirty run-queue again...
+ */
+ return !is_normal_sched && (sched_p != p
+ || !ERTS_RUNQ_IX_IS_DIRTY(runq->ix));
}
-
}
static ERTS_INLINE void
@@ -6522,8 +6601,17 @@ add2runq(int enqueue, erts_aint32_t prio,
if (runq) {
Process *sched_p;
- if (enqueue > 0)
+ if (enqueue > 0) {
sched_p = proc;
+ /*
+ * Refc on process struct (i.e. true struct,
+ * not proxy-struct) increased while in a
+ * dirty run-queue or executing on a dirty
+ * scheduler.
+ */
+ if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(proc);
+ }
else {
Process *pxy;
@@ -6536,12 +6624,12 @@ add2runq(int enqueue, erts_aint32_t prio,
sched_p = make_proxy_proc(pxy, proc, prio);
}
- erts_smp_runq_lock(runq);
+ erts_runq_lock(runq);
/* Enqueue the process */
enqueue_process(runq, (int) prio, sched_p);
- erts_smp_runq_unlock(runq);
+ erts_runq_unlock(runq);
smp_notify_inc_runq(runq);
}
}
@@ -6566,7 +6654,7 @@ change_proc_schedule_state(Process *p,
unsigned int lock_status = (prof_runnable_procs
&& !(locks & ERTS_PROC_LOCK_STATUS));
- ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p));
ASSERT(!(a & ERTS_PSFLG_PROXY));
ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING
@@ -6581,7 +6669,7 @@ change_proc_schedule_state(Process *p,
| ERTS_PSFLG_ACTIVE_SYS)) == 0);
if (lock_status)
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
while (1) {
erts_aint32_t e;
@@ -6605,11 +6693,9 @@ change_proc_schedule_state(Process *p,
| ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_IN_RUNQ
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE
-#ifdef ERTS_DIRTY_SCHEDULERS
|| (n & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING
-#endif
) {
/*
* Active and seemingly need to be enqueued, but
@@ -6619,7 +6705,7 @@ change_proc_schedule_state(Process *p,
enqueue = check_enqueue_in_prio_queue(p, enq_prio_p, &n, a);
}
- a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
if (enqueue == ERTS_ENQUEUE_NOT && n == a)
@@ -6632,23 +6718,23 @@ change_proc_schedule_state(Process *p,
if (((n & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
- && (!(a & (ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)
- && (!(a & ERTS_PSFLG_ACTIVE)
- || (a & ERTS_PSFLG_SUSPENDED))))) {
+ & ((a & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
+ & !(a & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))) {
/* We activated a prevously inactive process */
profile_runnable_proc(p, am_active);
}
if (lock_status)
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
- *statep = a;
+ *statep = n;
return enqueue;
}
@@ -6673,141 +6759,72 @@ erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks)
schedule_process(p, state, locks);
}
-static int
-schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st,
- erts_aint32_t *fail_state_p)
-{
- int res;
- int locked;
- ErtsProcSysTaskQs *stqs, *free_stqs;
- erts_aint32_t fail_state, state, a, n, enq_prio;
+/* Enqueues the given sys task on the process and schedules it. The task may be
+ * NULL if only scheduling is desired. */
+static ERTS_INLINE erts_aint32_t
+active_sys_enqueue(Process *p, ErtsProcSysTask *sys_task,
+ erts_aint32_t task_prio, erts_aint32_t enable_flags,
+ erts_aint32_t state, erts_aint32_t *fail_state_p)
+{
+ int runnable_procs = erts_system_profile_flags.runnable_procs;
+ erts_aint32_t n, a, enq_prio, fail_state;
+ int already_scheduled;
+ int status_locked;
int enqueue; /* < 0 -> use proxy */
- unsigned int prof_runnable_procs;
- int strict_fail_state;
+ enable_flags |= ERTS_PSFLG_ACTIVE_SYS;
fail_state = *fail_state_p;
- /*
- * If fail state something other than just exiting process,
- * ensure that the task wont be scheduled when the
- * receiver is in the failure state.
- */
- strict_fail_state = fail_state != ERTS_PSFLG_EXITING;
-
- res = 1; /* prepare for success */
- st->next = st->prev = st; /* Prep for empty prio queue */
- state = erts_smp_atomic32_read_nob(&p->state);
- prof_runnable_procs = erts_system_profile_flags.runnable_procs;
- locked = 0;
- free_stqs = NULL;
- if (state & ERTS_PSFLG_ACTIVE_SYS)
- stqs = NULL;
- else {
- alloc_qs:
- stqs = proc_sys_task_queues_alloc();
- stqs->qmask = 1 << prio;
- stqs->ncount = 0;
- stqs->q[PRIORITY_MAX] = NULL;
- stqs->q[PRIORITY_HIGH] = NULL;
- stqs->q[PRIORITY_NORMAL] = NULL;
- stqs->q[PRIORITY_LOW] = NULL;
- stqs->q[prio] = st;
- }
-
- if (!locked) {
- locked = 1;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
-
- state = erts_smp_atomic32_read_nob(&p->state);
- if (state & fail_state) {
- *fail_state_p = (state & fail_state);
- free_stqs = stqs;
- res = 0;
- goto cleanup;
- }
- }
-
- if (!p->sys_task_qs) {
- if (stqs)
- p->sys_task_qs = stqs;
- else
- goto alloc_qs;
- }
- else {
- free_stqs = stqs;
- stqs = p->sys_task_qs;
- if (!stqs->q[prio]) {
- stqs->q[prio] = st;
- stqs->qmask |= 1 << prio;
- }
- else {
- st->next = stqs->q[prio];
- st->prev = stqs->q[prio]->prev;
- st->next->prev = st;
- st->prev->next = st;
- ASSERT(stqs->qmask & (1 << prio));
- }
- }
-
- if (ERTS_PSFLGS_GET_ACT_PRIO(state) > prio) {
- erts_aint32_t n, a, e;
- /* Need to elevate actual prio */
-
- a = state;
- do {
- if (ERTS_PSFLGS_GET_ACT_PRIO(a) <= prio) {
- n = a;
- break;
- }
- n = e = a;
- n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
- n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
- a = erts_smp_atomic32_cmpxchg_nob(&p->state, n, e);
- } while (a != e);
- state = n;
- }
-
-
- a = state;
+ already_scheduled = 0;
+ status_locked = 0;
enq_prio = -1;
+ a = state;
- /* Status lock prevents out of order "runnable proc" trace msgs */
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
+ ASSERT(fail_state & (ERTS_PSFLG_EXITING | ERTS_PSFLG_FREE));
+ ASSERT(!(fail_state & enable_flags));
+ ASSERT(!(state & ERTS_PSFLG_PROXY));
- if (!prof_runnable_procs && !strict_fail_state) {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- locked = 0;
+ /* When runnable_procs is enabled, we need to take the status lock to
+ * prevent trace messages from being sent in the wrong order. The lock must
+ * be held over the call to add2runq.
+ *
+ * Otherwise, we only need to take it when we're enqueuing a task and can
+ * safely release it before add2runq. */
+ if (sys_task || runnable_procs) {
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ status_locked = 1;
}
- ASSERT(!(state & ERTS_PSFLG_PROXY));
-
while (1) {
erts_aint32_t e;
n = e = a;
- if (strict_fail_state && (a & fail_state)) {
- *fail_state_p = (a & fail_state);
+ if (a & fail_state) {
+ *fail_state_p = a & fail_state;
goto cleanup;
- }
-
- if (a & ERTS_PSFLG_FREE)
- goto cleanup; /* We don't want to schedule free processes... */
+ }
enqueue = ERTS_ENQUEUE_NOT;
- n |= ERTS_PSFLG_ACTIVE_SYS;
+ n |= enable_flags;
+
if (!(a & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)))
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))) {
enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
- a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
- if (a == e)
+ }
+
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (a == e) {
break;
- if (a == n && enqueue == ERTS_ENQUEUE_NOT)
- goto cleanup;
+ }
+ else if (a == n && enqueue == ERTS_ENQUEUE_NOT) {
+ already_scheduled = 1;
+ break;
+ }
}
- if (prof_runnable_procs) {
-
+ if (!already_scheduled && runnable_procs) {
if (!(a & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
@@ -6817,24 +6834,98 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st,
/* We activated a prevously inactive process */
profile_runnable_proc(p, am_active);
}
+ }
+
+ if (sys_task) {
+ ErtsProcSysTaskQs *stqs = p->sys_task_qs;
+
+ if (!stqs) {
+ sys_task->next = sys_task->prev = sys_task;
+
+ stqs = proc_sys_task_queues_alloc();
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- locked = 0;
+ stqs->qmask = 1 << task_prio;
+ stqs->ncount = 0;
+ stqs->q[PRIORITY_MAX] = NULL;
+ stqs->q[PRIORITY_HIGH] = NULL;
+ stqs->q[PRIORITY_NORMAL] = NULL;
+ stqs->q[PRIORITY_LOW] = NULL;
+ stqs->q[task_prio] = sys_task;
+
+ p->sys_task_qs = stqs;
+ }
+ else {
+ if (!stqs->q[task_prio]) {
+ sys_task->next = sys_task->prev = sys_task;
+
+ stqs->q[task_prio] = sys_task;
+ stqs->qmask |= 1 << task_prio;
+ }
+ else {
+ sys_task->next = stqs->q[task_prio];
+ sys_task->prev = stqs->q[task_prio]->prev;
+ sys_task->next->prev = sys_task;
+ sys_task->prev->next = sys_task;
+ ASSERT(stqs->qmask & (1 << task_prio));
+ }
+ }
}
- add2runq(enqueue, enq_prio, p, n, NULL);
+ if (status_locked && !runnable_procs) {
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ status_locked = 0;
+ }
+
+ if (!already_scheduled) {
+ add2runq(enqueue, enq_prio, p, n, NULL);
+ }
cleanup:
+ if (status_locked) {
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ }
- if (locked)
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ return n;
+}
- if (free_stqs)
- proc_sys_task_queues_free(free_stqs);
+erts_aint32_t
+erts_proc_sys_schedule(Process *p, erts_aint32_t state, erts_aint32_t enable_flag)
+{
+ erts_aint32_t fail_state = ERTS_PSFLG_FREE;
- ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
+ return active_sys_enqueue(p, NULL, 0, enable_flag, state, &fail_state);
+}
- return res;
+static int
+schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st,
+ erts_aint32_t *fail_state_p)
+{
+ erts_aint32_t fail_state, state;
+
+ /* Elevate priority if needed. */
+ state = erts_atomic32_read_nob(&p->state);
+ if (ERTS_PSFLGS_GET_ACT_PRIO(state) > prio) {
+ erts_aint32_t n, a, e;
+
+ a = state;
+ do {
+ if (ERTS_PSFLGS_GET_ACT_PRIO(a) <= prio) {
+ n = a;
+ break;
+ }
+ n = e = a;
+ n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
+ n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
+ a = erts_atomic32_cmpxchg_nob(&p->state, n, e);
+ } while (a != e);
+
+ state = n;
+ }
+
+ fail_state = *fail_state_p;
+
+ return !(active_sys_enqueue(p, st, prio, ERTS_PSFLG_SYS_TASKS,
+ state, fail_state_p) & fail_state);
}
static ERTS_INLINE int
@@ -6842,15 +6933,15 @@ suspend_process(Process *c_p, Process *p)
{
erts_aint32_t state;
int suspended = 0;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- state = erts_smp_atomic32_read_acqb(&p->state);
+ state = erts_atomic32_read_acqb(&p->state);
if ((state & ERTS_PSFLG_SUSPENDED))
suspended = -1;
else {
if (c_p == p) {
- state = erts_smp_atomic32_read_bor_relb(&p->state,
+ state = erts_atomic32_read_bor_relb(&p->state,
ERTS_PSFLG_SUSPENDED);
ASSERT(state & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
@@ -6866,7 +6957,7 @@ suspend_process(Process *c_p, Process *p)
n = e = state;
n |= ERTS_PSFLG_SUSPENDED;
- state = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e);
+ state = erts_atomic32_cmpxchg_relb(&p->state, n, e);
if (state == e) {
suspended = 1;
break;
@@ -6911,14 +7002,14 @@ resume_process(Process *p, ErtsProcLocks locks)
erts_aint32_t state, enq_prio = -1;
int enqueue;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
ASSERT(p->rcount > 0);
if (--p->rcount > 0) /* multiple suspend */
return;
- state = erts_smp_atomic32_read_nob(&p->state);
+ state = erts_atomic32_read_nob(&p->state);
enqueue = change_proc_schedule_state(p,
ERTS_PSFLG_SUSPENDED,
0,
@@ -6928,17 +7019,9 @@ resume_process(Process *p, ErtsProcLocks locks)
add2runq(enqueue, enq_prio, p, state, NULL);
}
-#ifdef ERTS_SMP
-
-static void
-scheduler_ix_resume_wake(Uint ix)
-{
- ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
-}
-static void
-scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi)
+static ERTS_INLINE void
+sched_resume_wake__(ErtsSchedulerSleepInfo *ssi)
{
erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_TSE_SLEEPING
@@ -6946,15 +7029,35 @@ scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi)
| ERTS_SSI_FLG_SUSPENDED);
erts_aint32_t oflgs;
do {
- oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs);
+ oflgs = erts_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs);
if (oflgs == xflgs) {
erts_sched_finish_poke(ssi, oflgs);
break;
}
xflgs = oflgs;
- } while (oflgs & ERTS_SSI_FLG_SUSPENDED);
+ } while (oflgs & (ERTS_SSI_FLG_MSB_EXEC|ERTS_SSI_FLG_SUSPENDED));
+}
+
+static void
+nrml_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
+
+static void
+dcpu_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+}
+
+static void
+dio_sched_ix_resume_wake(Uint ix)
+{
+ sched_resume_wake__(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix));
+}
+
+
static erts_aint32_t
sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct)
{
@@ -6965,7 +7068,7 @@ sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct)
erts_aint32_t xflgs = xpct;
do {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -6982,7 +7085,7 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
erts_aint32_t flgs;
do {
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ flgs = erts_atomic32_read_acqb(&ssi->flags);
if ((flgs & (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED))
@@ -7001,21 +7104,23 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
}
static erts_aint32_t
-sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
+sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi,
+ erts_aint32_t sleep_type)
{
erts_aint32_t oflgs;
- erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED);
+ erts_aint32_t nflgs = ((ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)
+ | sleep_type);
erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED);
+ ASSERT(sleep_type == ERTS_SSI_FLG_TSE_SLEEPING);
erts_tse_reset(ssi->event);
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING
@@ -7033,11 +7138,19 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
static void
init_scheduler_suspend(void)
{
- erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
- schdlr_sspnd.online = ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0);
- schdlr_sspnd.curr_online = ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0);
- schdlr_sspnd.active = ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0);
- erts_smp_atomic32_init_nob(&schdlr_sspnd.changing, 0);
+ erts_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+ schdlr_sspnd.online.normal = 1;
+ schdlr_sspnd.curr_online.normal = 1;
+ schdlr_sspnd.active.normal = 1;
+ schdlr_sspnd.online.dirty_cpu = 0;
+ schdlr_sspnd.curr_online.dirty_cpu = 0;
+ schdlr_sspnd.active.dirty_cpu = 0;
+ schdlr_sspnd.online.dirty_io = 0;
+ schdlr_sspnd.curr_online.dirty_io = 0;
+ schdlr_sspnd.active.dirty_io = 0;
+ schdlr_sspnd.last_msb_dirty_type = ERTS_SCHED_DIRTY_IO;
+ erts_atomic32_init_nob(&schdlr_sspnd.changing, 0);
schdlr_sspnd.chngq = NULL;
schdlr_sspnd.changer = am_false;
schdlr_sspnd.nmsb.ongoing = 0;
@@ -7059,12 +7172,18 @@ typedef struct {
} ErtsSchdlrSspndResume;
static void
-schdlr_sspnd_resume_proc(Eterm pid)
+schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid)
{
- Process *p = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_STATUS);
+ Process *p;
+ p = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_STATUS,
+ (sched_type != ERTS_SCHED_NORMAL
+ ? ERTS_P2P_FLG_INC_REFC
+ : 0));
if (p) {
resume_process(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ erts_proc_dec_refc(p);
}
}
@@ -7073,21 +7192,285 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
ErtsSchdlrSspndResume *resume)
{
if (is_internal_pid(resume->onln.chngr)) {
- schdlr_sspnd_resume_proc(resume->onln.chngr);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.chngr);
resume->onln.chngr = NIL;
}
if (is_internal_pid(resume->onln.nxt)) {
- schdlr_sspnd_resume_proc(resume->onln.nxt);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.nxt);
resume->onln.nxt = NIL;
}
while (resume->msb.chngrs) {
ErtsProcList *plp = resume->msb.chngrs;
resume->msb.chngrs = plp->next;
- schdlr_sspnd_resume_proc(plp->pid);
+ schdlr_sspnd_resume_proc(sched_type,
+ plp->pid);
proclist_destroy(plp);
}
}
+
+static ERTS_INLINE int
+have_dirty_work(void)
+{
+ return !(ERTS_EMPTY_RUNQ(ERTS_DIRTY_CPU_RUNQ)
+ | ERTS_EMPTY_RUNQ(ERTS_DIRTY_IO_RUNQ));
+}
+
+#define ERTS_MSB_NONE_PRIO_BIT PORT_BIT
+
+static ERTS_INLINE Uint32
+msb_runq_prio_bit(Uint32 flgs)
+{
+ int pbit;
+
+ pbit = (int) (flgs & ERTS_RUNQ_FLGS_PROCS_QMASK);
+ if (flgs & PORT_BIT) {
+ /* rate ports as proc prio high */
+ pbit |= HIGH_BIT;
+ }
+ if (flgs & ERTS_RUNQ_FLG_MISC_OP) {
+ /* rate misc ops as proc prio normal */
+ pbit |= NORMAL_BIT;
+ }
+ if (flgs & LOW_BIT) {
+ /* rate low prio as normal (avoid starvation) */
+ pbit |= NORMAL_BIT;
+ }
+ if (!pbit)
+ pbit = (int) ERTS_MSB_NONE_PRIO_BIT;
+ else
+ pbit &= -pbit; /* least significant bit set... */
+ ASSERT(pbit);
+
+ /* High prio low value; low prio high value... */
+ return (Uint32) pbit;
+}
+
+static ERTS_INLINE void
+msb_runq_prio_bits(Uint32 *nrmlp, Uint32 *dcpup, Uint32 *diop)
+{
+ Uint32 flgs = ERTS_RUNQ_FLGS_GET(ERTS_RUNQ_IX(0));
+ if (flgs & ERTS_RUNQ_FLG_HALTING) {
+ /*
+ * Emulator is halting; only execute port jobs
+ * on normal scheduler. Ensure that we switch
+ * to the normal scheduler.
+ */
+ *nrmlp = HIGH_BIT;
+ *dcpup = ERTS_MSB_NONE_PRIO_BIT;
+ *diop = ERTS_MSB_NONE_PRIO_BIT;
+ }
+ else {
+ *nrmlp = msb_runq_prio_bit(flgs);
+
+ flgs = ERTS_RUNQ_FLGS_GET(ERTS_DIRTY_CPU_RUNQ);
+ *dcpup = msb_runq_prio_bit(flgs);
+
+ flgs = ERTS_RUNQ_FLGS_GET(ERTS_DIRTY_IO_RUNQ);
+ *diop = msb_runq_prio_bit(flgs);
+ }
+}
+
+static int
+msb_scheduler_type_switch(ErtsSchedType sched_type,
+ ErtsSchedulerData *esdp,
+ long no)
+{
+ Uint32 nrml_prio, dcpu_prio, dio_prio;
+ ErtsSchedType exec_type;
+ ErtsRunQueue *exec_rq;
+#ifdef DEBUG
+ erts_aint32_t dbg_val;
+#endif
+
+ ASSERT(schdlr_sspnd.msb.ongoing);
+
+ /*
+ * This function determines how to switch
+ * between scheduler types when multi-scheduling
+ * is blocked.
+ *
+ * If no dirty work exist, we always select
+ * execution of normal scheduler. If nothing
+ * executes, normal scheduler 1 should be waiting
+ * in sys_schedule(), otherwise we cannot react
+ * on I/O events.
+ *
+ * We unconditionally switch back to normal
+ * scheduler after executing dirty in order to
+ * make sure we check for I/O...
+ */
+
+ msb_runq_prio_bits(&nrml_prio, &dcpu_prio, &dio_prio);
+
+ exec_type = ERTS_SCHED_NORMAL;
+ if (sched_type == ERTS_SCHED_NORMAL) {
+
+ /*
+ * Check priorities of work in the
+ * different run-queues and determine
+ * run-queue with highest prio job...
+ */
+
+ if ((dcpu_prio == ERTS_MSB_NONE_PRIO_BIT)
+ & (dio_prio == ERTS_MSB_NONE_PRIO_BIT)) {
+ /*
+ * No dirty work exist; continue on normal
+ * scheduler...
+ */
+ return 0;
+ }
+
+ if (dcpu_prio < nrml_prio) {
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ if (dio_prio < dcpu_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
+ else {
+ if (dio_prio < nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
+
+ /*
+ * Make sure to alternate between dirty types
+ * inbetween normal execution if highest
+ * priorities are equal.
+ */
+
+ if (exec_type == ERTS_SCHED_NORMAL) {
+ if (dcpu_prio == nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ else if (dio_prio == nrml_prio)
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ else {
+ /*
+ * Normal work has higher prio than
+ * dirty work; continue on normal
+ * scheduler...
+ */
+ return 0;
+ }
+ }
+
+ ASSERT(exec_type != ERTS_SCHED_NORMAL);
+ if (dio_prio == dcpu_prio) {
+ /* Alter between dirty types... */
+ if (schdlr_sspnd.last_msb_dirty_type == ERTS_SCHED_DIRTY_IO)
+ exec_type = ERTS_SCHED_DIRTY_CPU;
+ else
+ exec_type = ERTS_SCHED_DIRTY_IO;
+ }
+ }
+
+ ASSERT(sched_type != exec_type);
+
+ if (exec_type != ERTS_SCHED_NORMAL)
+ schdlr_sspnd.last_msb_dirty_type = exec_type;
+ else {
+
+ if ((nrml_prio == ERTS_MSB_NONE_PRIO_BIT)
+ & ((dcpu_prio != ERTS_MSB_NONE_PRIO_BIT)
+ | (dio_prio != ERTS_MSB_NONE_PRIO_BIT))) {
+ /*
+ * We have dirty work, but an empty
+ * normal run-queue.
+ *
+ * Since the normal run-queue is
+ * empty, the normal scheduler will
+ * go to sleep when selected for
+ * execution. We have dirty work to
+ * do, so we only want it to check
+ * I/O, and then come back here and
+ * switch to dirty execution.
+ *
+ * To prevent the scheduler from going
+ * to sleep we trick it into believing
+ * it has work to do...
+ */
+ ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0),
+ ERTS_RUNQ_FLG_MISC_OP);
+ }
+ }
+
+ /*
+ * Suspend this scheduler and wake up scheduler
+ * number one of another type...
+ */
+#ifdef DEBUG
+ dbg_val =
+#else
+ (void)
+#endif
+ erts_atomic32_read_bset_mb(&esdp->ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC),
+ ERTS_SSI_FLG_SUSPENDED);
+ ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC);
+
+ switch (exec_type) {
+ case ERTS_SCHED_NORMAL:
+ exec_rq = ERTS_RUNQ_IX(0);
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ exec_rq = ERTS_DIRTY_CPU_RUNQ;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ exec_rq = ERTS_DIRTY_IO_RUNQ;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ exec_rq = NULL;
+ break;
+ }
+
+#ifdef DEBUG
+ dbg_val =
+#else
+ (void)
+#endif
+ erts_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags,
+ (ERTS_SSI_FLG_SUSPENDED
+ | ERTS_SSI_FLG_MSB_EXEC),
+ ERTS_SSI_FLG_MSB_EXEC);
+ ASSERT(dbg_val & ERTS_SSI_FLG_SUSPENDED);
+
+ wake_scheduler(exec_rq);
+
+ return 1; /* suspend this scheduler... */
+
+}
+
+static ERTS_INLINE void
+suspend_normal_scheduler_sleep(ErtsSchedulerData *esdp)
+{
+ ErtsSchedulerSleepInfo *ssi = esdp->ssi;
+ erts_aint32_t flgs = sched_spin_suspended(ssi,
+ ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ flgs = sched_set_suspended_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+}
+
+static ERTS_INLINE void
+suspend_dirty_scheduler_sleep(ErtsSchedulerData *esdp)
+{
+ suspend_normal_scheduler_sleep(esdp);
+}
+
static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
@@ -7113,62 +7496,60 @@ suspend_scheduler(ErtsSchedulerData *esdp)
* Regardless of why a scheduler is suspended, it ends up here.
*/
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- no = ERTS_DIRTY_SCHEDULER_NO(esdp);
- if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) {
- online_flag = ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN;
- sched_type = ERTS_SCHED_DIRTY_CPU;
- }
- else {
- online_flag = 0;
- sched_type = ERTS_SCHED_DIRTY_IO;
- }
+
+
+ sched_type = esdp->type;
+ switch (sched_type) {
+ case ERTS_SCHED_NORMAL:
+ online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN;
+ no = esdp->no;
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ online_flag = ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN;
+ no = esdp->dirty_no;
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ online_flag = 0;
+ no = esdp->dirty_no;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid scheduler type");
+ return;
}
- else
-#endif
- {
- online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN;
- no = esdp->no;
- sched_type = ERTS_SCHED_NORMAL;
+
+ if (erts_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) {
+ ASSERT(no == 1);
+ if (!msb_scheduler_type_switch(sched_type, esdp, no))
+ return;
+ /* Suspend and let scheduler 1 of another type execute... */
}
- ASSERT(sched_type != ERTS_SCHED_NORMAL || no != 1);
if (sched_type != ERTS_SCHED_NORMAL) {
- if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) {
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- }
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
+ dirty_active(esdp, -1);
+ erts_runq_unlock(esdp->run_queue);
+ dirty_sched_wall_time_change(esdp, 0);
}
else {
- evacuate_run_queue(esdp->run_queue, &sbp);
+ if (no != 1)
+ evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
+ erts_runq_unlock(esdp->run_queue);
erts_sched_check_cpu_bind_prep_suspend(esdp);
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_inactive);
-
- sched_wall_time_change(esdp, 0);
-
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
+ erts_mtx_lock(&schdlr_sspnd.mtx);
+
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
schdlr_sspnd_dec_nscheds(&schdlr_sspnd.active, sched_type);
- ASSERT(schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
- ERTS_SCHED_NORMAL) >= 1);
-
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+ changing = erts_atomic32_read_nob(&schdlr_sspnd.changing);
while (1) {
@@ -7189,28 +7570,35 @@ suspend_scheduler(ErtsSchedulerData *esdp)
ERTS_SCHED_NORMAL) == 1) {
clr_flg = ERTS_SCHDLR_SSPND_CHNG_NMSB;
}
- else if (schdlr_sspnd.active
- == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0)) {
- clr_flg = ERTS_SCHDLR_SSPND_CHNG_MSB;
+ else if (schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL) == 1
+ && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_CPU) == 0
+ && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_DIRTY_IO) == 0) {
+ clr_flg = ERTS_SCHDLR_SSPND_CHNG_MSB;
}
if (clr_flg) {
ErtsProcList *plp, *end_plp;
- changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ changing = erts_atomic32_read_band_nob(&schdlr_sspnd.changing,
~clr_flg);
changing &= ~clr_flg;
(void) erts_proclist_fetch(&msb[i]->chngq, &end_plp);
/* resume processes that initiated the multi scheduling block... */
plp = msb[i]->chngq;
- while (plp) {
- erts_proclist_store_last(&msb[i]->blckrs,
- proclist_copy(plp));
- plp = plp->next;
- }
- if (end_plp)
+ if (plp) {
+ ASSERT(end_plp);
+ ASSERT(msb[i]->ongoing);
+ do {
+ erts_proclist_store_last(&msb[i]->blckrs,
+ proclist_copy(plp));
+ plp = plp->next;
+ } while (plp);
end_plp->next = resume.msb.chngrs;
- resume.msb.chngrs = msb[i]->chngq;
- msb[i]->chngq = NULL;
+ resume.msb.chngrs = msb[i]->chngq;
+ msb[i]->chngq = NULL;
+ }
}
}
}
@@ -7239,7 +7627,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
== schdlr_sspnd_get_nscheds(&schdlr_sspnd.curr_online,
sched_type))) {
ErtsProcList *plp;
- changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ changing = erts_atomic32_read_band_nob(&schdlr_sspnd.changing,
~online_flag);
changing &= ~online_flag;
if (sched_type == ERTS_SCHED_NORMAL) {
@@ -7260,44 +7648,31 @@ suspend_scheduler(ErtsSchedulerData *esdp)
}
}
- if (curr_online
- && (sched_type == ERTS_SCHED_NORMAL
- ? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
- : !schdlr_sspnd.msb.ongoing)) {
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ if (curr_online) {
+ flgs = erts_atomic32_read_acqb(&ssi->flags);
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
schdlr_sspnd_resume_procs(sched_type, &resume);
while (1) {
- ErtsMonotonicTime current_time;
- erts_aint32_t qmask;
- erts_aint32_t flgs;
-
- qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
- & ERTS_RUNQ_FLGS_QMASK);
-
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (qmask) {
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
- aux_work = 0;
- }
- else {
+ if (sched_type != ERTS_SCHED_NORMAL)
+ suspend_dirty_scheduler_sleep(esdp);
+ else
+ {
+ ErtsMonotonicTime current_time, timeout_time;
+ int evacuate = no == 1 ? 0 : !ERTS_EMPTY_RUNQ(esdp->run_queue);
+
+ ASSERT(sched_type == ERTS_SCHED_NORMAL);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work|qmask) {
+ if (aux_work|evacuate) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp),
+ thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
if (aux_work)
@@ -7305,155 +7680,105 @@ suspend_scheduler(ErtsSchedulerData *esdp)
aux_work,
1);
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
- if (qmask) {
- erts_smp_runq_lock(esdp->run_queue);
+ if (aux_work && erts_thr_progress_update(erts_thr_prgr_data(esdp)))
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
+ if (evacuate) {
+ erts_runq_lock(esdp->run_queue);
evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
- }
- }
-
- }
-
- if (aux_work) {
- ASSERT(sched_type == ERTS_SCHED_NORMAL);
- current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
+ erts_runq_unlock(esdp->run_queue);
}
- erts_bump_timers(esdp->timer_wheel, current_time);
}
- }
- else {
- ErtsMonotonicTime timeout_time;
- int do_timeout;
- if (sched_type == ERTS_SCHED_NORMAL) {
- timeout_time = erts_check_next_timeout_time(esdp);
- current_time = erts_get_monotonic_time(esdp);
- do_timeout = (current_time >= timeout_time);
- }
- else {
- timeout_time = ERTS_MONOTONIC_TIME_MAX;
- current_time = 0;
- do_timeout = 0;
- }
- if (do_timeout) {
- ASSERT(sched_type == ERTS_SCHED_NORMAL);
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- }
- else {
- if (sched_type == ERTS_SCHED_NORMAL) {
- if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
- sched_wall_time_change(esdp, 0);
- }
- erts_thr_progress_prepare_wait(esdp);
- }
- flgs = sched_spin_suspended(ssi,
- ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
- if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED)) {
- flgs = sched_set_suspended_sleeptype(ssi);
- if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED)) {
- int res;
-
- if (sched_type == ERTS_SCHED_NORMAL)
- current_time = erts_get_monotonic_time(esdp);
- else
- current_time = 0;
-
- do {
- Sint64 timeout;
- if (current_time >= timeout_time)
- break;
- if (sched_type != ERTS_SCHED_NORMAL)
- timeout = -1;
- else
- timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time
- - current_time
- - 1) + 1;
- res = erts_tse_twait(ssi->event, timeout);
-
- if (sched_type == ERTS_SCHED_NORMAL)
- current_time = erts_get_monotonic_time(esdp);
- else
- current_time = 0;
-
- } while (res == EINTR);
- }
- }
- if (sched_type == ERTS_SCHED_NORMAL)
- erts_thr_progress_finalize_wait(esdp);
- }
-
- if (current_time >= timeout_time) {
- ASSERT(sched_type == ERTS_SCHED_NORMAL);
- erts_bump_timers(esdp->timer_wheel, current_time);
- }
- }
+ if (aux_work)
+ timeout_time = erts_next_timeout_time(esdp->next_tmo_ref);
+ else
+ timeout_time = erts_check_next_timeout_time(esdp);
+
+ current_time = erts_get_monotonic_time(esdp);
+
+ if (!aux_work && current_time < timeout_time) {
+ /* go to sleep... */
+ if (thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(erts_thr_prgr_data(NULL));
+ suspend_normal_scheduler_sleep(esdp);
+ erts_thr_progress_finalize_wait(erts_thr_prgr_data(NULL));
+ current_time = erts_get_monotonic_time(esdp);
+ }
+
+ if (current_time >= timeout_time) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ erts_bump_timers(esdp->timer_wheel, current_time);
+ }
+ }
flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED));
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+ changing = erts_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing)
break;
}
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+ erts_mtx_lock(&schdlr_sspnd.mtx);
+ changing = erts_atomic32_read_nob(&schdlr_sspnd.changing);
}
schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type);
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
- && schdlr_sspnd.online == schdlr_sspnd.active) {
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
- }
-
+ changing = erts_atomic32_read_nob(&schdlr_sspnd.changing);
+ if (changing) {
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && !schdlr_sspnd.msb.ongoing
+ && schdlr_sspnd_eq_nscheds(&schdlr_sspnd.online,
+ &schdlr_sspnd.active)) {
+ erts_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB)
+ && !schdlr_sspnd.nmsb.ongoing
+ && (schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL)
+ == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL))) {
+ erts_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_NMSB);
+ }
+ }
ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type));
- ASSERT((sched_type == ERTS_SCHED_NORMAL
- ? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
- : !schdlr_sspnd.msb.ongoing));
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
- ASSERT(!resume.msb.chngrs);
schdlr_sspnd_resume_procs(sched_type, &resume);
ASSERT(curr_online);
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (erts_system_profile_flags.scheduler)
- profile_scheduler(make_small(esdp->no), am_active);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ dirty_sched_wall_time_change(esdp, 1);
+ else {
+ (void) erts_get_monotonic_time(esdp);
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_active);
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
}
- if (sched_type == ERTS_SCHED_NORMAL)
- (void) erts_get_monotonic_time(esdp);
- erts_smp_runq_lock(esdp->run_queue);
+ erts_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
- if (sched_type == ERTS_SCHED_NORMAL) {
+ if (sched_type != ERTS_SCHED_NORMAL)
+ dirty_active(esdp, 1);
+ else {
schedule_bound_processes(esdp->run_queue, &sbp);
erts_sched_check_cpu_bind_post_suspend(esdp);
@@ -7472,7 +7797,7 @@ erts_schedulers_state(Uint *total,
{
if (active || online || dirty_cpu_online
|| dirty_cpu_active || dirty_io_active) {
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_mtx_lock(&schdlr_sspnd.mtx);
if (active)
*active = schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
ERTS_SCHED_NORMAL);
@@ -7488,7 +7813,7 @@ erts_schedulers_state(Uint *total,
if (dirty_io_active)
*dirty_io_active = schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
ERTS_SCHED_DIRTY_IO);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
}
if (total)
@@ -7504,7 +7829,7 @@ abort_sched_onln_chng_waitq(Process *p)
{
Eterm resume = NIL;
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_mtx_lock(&schdlr_sspnd.mtx);
#ifdef DEBUG
{
@@ -7554,10 +7879,10 @@ abort_sched_onln_chng_waitq(Process *p)
}
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
if (is_internal_pid(resume))
- schdlr_sspnd_resume_proc(resume);
+ schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume);
}
ErtsSchedSuspendResult
@@ -7571,11 +7896,7 @@ erts_set_schedulers_online(Process *p,
erts_aint32_t changing = 0, change_flags;
int online, increase;
ErtsProcList *plp;
-#ifdef ERTS_DIRTY_SCHEDULERS
int dirty_no, change_dirty, dirty_online;
-#else
- ASSERT(!dirty_only);
-#endif
if (new_no < 1)
return ERTS_SCHDLR_SSPND_EINVAL;
@@ -7584,11 +7905,9 @@ erts_set_schedulers_online(Process *p,
else if (erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (dirty_only)
resume_proc = 0;
else
-#endif
{
resume_proc = 1;
/*
@@ -7597,23 +7916,21 @@ erts_set_schedulers_online(Process *p,
* race...
*/
if (!(plocks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
suspend_process(p, p);
if (!(plocks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_mtx_lock(&schdlr_sspnd.mtx);
change_flags = 0;
have_unlocked_plocks = 0;
no = (int) new_no;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!dirty_only)
-#endif
{
- changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
+ changing = erts_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
enqueue_wait:
p->flags |= F_SCHDLR_ONLN_WAITQ;
@@ -7639,12 +7956,6 @@ erts_set_schedulers_online(Process *p,
*old_no = online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_NORMAL);
-#ifndef ERTS_DIRTY_SCHEDULERS
- if (no == online) {
- res = ERTS_SCHDLR_SSPND_DONE;
- goto done;
- }
-#else /* ERTS_DIRTY_SCHEDULERS */
dirty_online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_DIRTY_CPU);
if (dirty_only)
@@ -7696,7 +8007,6 @@ erts_set_schedulers_online(Process *p,
if (dirty_only)
increase = (dirty_no > dirty_online);
else
-#endif /* ERTS_DIRTY_SCHEDULERS */
{
change_flags |= ERTS_SCHDLR_SSPND_CHNG_ONLN;
schdlr_sspnd_set_nscheds(&schdlr_sspnd.online,
@@ -7705,12 +8015,11 @@ erts_set_schedulers_online(Process *p,
increase = (no > online);
}
- erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing, change_flags);
+ erts_atomic32_read_bor_nob(&schdlr_sspnd.changing, change_flags);
res = ERTS_SCHDLR_SSPND_DONE;
if (increase) {
int ix;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (change_dirty) {
ErtsSchedulerSleepInfo* ssi;
if (schdlr_sspnd.msb.ongoing) {
@@ -7719,14 +8028,11 @@ erts_set_schedulers_online(Process *p,
erts_sched_poke(ssi);
}
} else {
- for (ix = dirty_online; ix < dirty_no; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- }
+ for (ix = dirty_online; ix < dirty_no; ix++)
+ dcpu_sched_ix_resume_wake(ix);
}
}
if (!dirty_only)
-#endif
{
if (schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing) {
for (ix = online; ix < no; ix++)
@@ -7735,7 +8041,7 @@ erts_set_schedulers_online(Process *p,
else {
if (plocks) {
have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
+ erts_proc_unlock(p, plocks);
}
change_no_used_runqs(no);
@@ -7748,25 +8054,23 @@ erts_set_schedulers_online(Process *p,
}
}
else /* if decrease */ {
-#ifdef ERTS_DIRTY_SCHEDULERS
if (change_dirty) {
- ErtsSchedulerSleepInfo* ssi;
if (schdlr_sspnd.msb.ongoing) {
- for (ix = dirty_no; ix < dirty_online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_sched_poke(ssi);
- }
- } else {
- for (ix = dirty_no; ix < dirty_online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ for (ix = dirty_no; ix < dirty_online; ix++)
+ erts_sched_poke(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix));
+ }
+ else {
+ for (ix = dirty_no; ix < dirty_online; ix++)
+ dcpu_sched_ix_suspend_wake(ix);
+ /*
+ * Newly suspended scheduler may have just been
+ * about to handle a task. Make sure someone takes
+ * care of such a task...
+ */
+ dcpu_sched_ix_wake(0);
}
}
if (!dirty_only)
-#endif
{
if (schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing) {
for (ix = no; ix < online; ix++)
@@ -7775,7 +8079,7 @@ erts_set_schedulers_online(Process *p,
else {
if (plocks) {
have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
+ erts_proc_unlock(p, plocks);
}
change_no_used_runqs(no);
@@ -7804,17 +8108,17 @@ done:
<= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_NORMAL));
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
if (have_unlocked_plocks)
- erts_smp_proc_lock(p, plocks);
+ erts_proc_lock(p, plocks);
if (resume_proc) {
if (!(plocks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
resume_process(p, plocks|ERTS_PROC_LOCK_STATUS);
if (!(plocks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
return res;
@@ -7825,9 +8129,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
{
int resume_proc, ix, res, have_unlocked_plocks = 0;
ErtsProcList *plp;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsSchedulerSleepInfo* ssi;
-#endif
ErtsMultiSchedulingBlock *msbp;
erts_aint32_t chng_flg;
int have_blckd_flg;
@@ -7855,97 +8156,79 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
else {
resume_proc = 1;
if (!(plocks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
suspend_process(p, p);
if (!(plocks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_mtx_lock(&schdlr_sspnd.mtx);
if (on) { /* ------ BLOCK ------ */
if (msbp->chngq) {
ASSERT(msbp->ongoing);
p->flags |= have_blckd_flg;
goto wait_until_msb;
}
- else if (msbp->blckrs) {
- ASSERT(msbp->ongoing);
+ else if (msbp->blckrs || (normal && erts_no_schedulers == 1)) {
+ ASSERT(!msbp->blckrs || msbp->ongoing);
+ msbp->ongoing = 1;
plp = proclist_create(p);
erts_proclist_store_last(&msbp->blckrs, plp);
p->flags |= have_blckd_flg;
ASSERT(normal
- ? 1 == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL)
- : schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0));
+ ? 1 == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL)
+ : schdlr_sspnd_get_nscheds_tot(&schdlr_sspnd.active) == 1);
ASSERT(erts_proc_sched_data(p)->no == 1);
if (schdlr_sspnd.msb.ongoing)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
else
res = ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED;
}
- else {
+ else {
int online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_NORMAL);
ASSERT(!msbp->ongoing);
p->flags |= have_blckd_flg;
if (plocks) {
have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
+ erts_proc_unlock(p, plocks);
}
ASSERT(!msbp->ongoing);
msbp->ongoing = 1;
- if (schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0)
- || (normal && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
- ERTS_SCHED_NORMAL) == 1)) {
- ASSERT(erts_proc_sched_data(p)->no == 1);
- plp = proclist_create(p);
- erts_proclist_store_last(&msbp->blckrs, plp);
- if (schdlr_sspnd.msb.ongoing)
- res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- else
- res = ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED;
- }
- else {
- erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing,
- chng_flg);
- change_no_used_runqs(1);
- for (ix = 1; ix < erts_no_run_queues; ix++)
- suspend_run_queue(ERTS_RUNQ_IX(ix));
- for (ix = 1; ix < online; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq);
- }
+ erts_atomic32_read_bor_nob(&schdlr_sspnd.changing,
+ chng_flg);
+ change_no_used_runqs(1);
+ for (ix = 1; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!normal) {
- for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0);
+ for (ix = 1; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ wake_scheduler(rq);
+ }
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
- ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0);
- }
-#endif
+ if (!normal) {
+ ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0), ERTS_RUNQ_FLG_MSB_EXEC);
+ erts_atomic32_read_bor_nob(&ERTS_RUNQ_IX(0)->scheduler->ssi->flags,
+ ERTS_SSI_FLG_MSB_EXEC);
+ for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++)
+ dcpu_sched_ix_suspend_wake(ix);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++)
+ dio_sched_ix_suspend_wake(ix);
+ }
- wait_until_msb:
+ wait_until_msb:
- ASSERT(chng_flg & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing));
+ ASSERT(chng_flg & erts_atomic32_read_nob(&schdlr_sspnd.changing));
- plp = proclist_create(p);
- erts_proclist_store_last(&msbp->chngq, plp);
- resume_proc = 0;
- if (schdlr_sspnd.msb.ongoing)
- res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
- else
- res = ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED;
- }
+ plp = proclist_create(p);
+ erts_proclist_store_last(&msbp->chngq, plp);
+ resume_proc = 0;
+ if (schdlr_sspnd.msb.ongoing)
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
+ else
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED;
ASSERT(erts_proc_sched_data(p));
}
}
@@ -7955,21 +8238,20 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
else { /* ------ UNBLOCK ------ */
if (p->flags & have_blckd_flg) {
- ErtsProcList *plps[2];
+ ErtsProcList **plpps[3] = {0};
ErtsProcList *plp;
- int limit = 0;
- plps[limit++] = erts_proclist_peek_first(msbp->blckrs);
- if (all)
- plps[limit++] = erts_proclist_peek_first(msbp->chngq);
+ plpps[0] = &msbp->blckrs;
+ if (all)
+ plpps[1] = &msbp->chngq;
- for (ix = 0; ix < limit; ix++) {
- plp = plps[ix];
+ for (ix = 0; plpps[ix]; ix++) {
+ plp = erts_proclist_peek_first(*plpps[ix]);
while (plp) {
ErtsProcList *tmp_plp = plp;
- plp = erts_proclist_peek_next(msbp->blckrs, plp);
+ plp = erts_proclist_peek_next(*plpps[ix], plp);
if (erts_proclist_same(tmp_plp, p)) {
- erts_proclist_remove(&msbp->blckrs, tmp_plp);
+ erts_proclist_remove(plpps[ix], tmp_plp);
proclist_destroy(tmp_plp);
if (!all)
break;
@@ -7978,27 +8260,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
}
if (!msbp->blckrs && !msbp->chngq) {
- int online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
- ERTS_SCHED_NORMAL);
- erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing,
+ int online;
+ erts_atomic32_read_bor_nob(&schdlr_sspnd.changing,
chng_flg);
p->flags &= ~have_blckd_flg;
msbp->ongoing = 0;
- if (online == 1) {
- /* No normal schedulers to resume */
- ASSERT(schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
- ERTS_SCHED_NORMAL) == 1);
-#ifndef ERTS_DIRTY_SCHEDULERS
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~chng_flg);
-#endif
- }
- else if (!(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)) {
- if (plocks) {
+ if (!(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)) {
+ if (plocks) {
have_unlocked_plocks = 1;
- erts_smp_proc_unlock(p, plocks);
+ erts_proc_unlock(p, plocks);
}
+ online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL);
change_no_used_runqs(online);
/* Resume all online run queues */
@@ -8008,22 +8282,16 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
for (ix = online; ix < erts_no_run_queues; ix++)
suspend_run_queue(ERTS_RUNQ_IX(ix));
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!normal) {
- ASSERT(!schdlr_sspnd.msb.ongoing);
+ if (!schdlr_sspnd.msb.ongoing) {
+ /* Get rid of msb-exec flag in run-queue of scheduler 1 */
+ resume_run_queue(ERTS_RUNQ_IX(0));
online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_DIRTY_CPU);
- for (ix = 0; ix < online; ix++) {
- ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- }
-
- for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
- ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix);
- scheduler_ssi_resume_wake(ssi);
- }
+ for (ix = 0; ix < online; ix++)
+ dcpu_sched_ix_resume_wake(ix);
+ for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++)
+ dio_sched_ix_resume_wake(ix);
}
-#endif
}
unblock_res:
@@ -8035,17 +8303,17 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
res = ERTS_SCHDLR_SSPND_DONE;
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
if (have_unlocked_plocks)
- erts_smp_proc_lock(p, plocks);
+ erts_proc_lock(p, plocks);
if (resume_proc) {
if (!(plocks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
resume_process(p, plocks|ERTS_PROC_LOCK_STATUS);
if (!(plocks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
return res;
@@ -8055,14 +8323,14 @@ int
erts_is_multi_scheduling_blocked(void)
{
int res;
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_mtx_lock(&schdlr_sspnd.mtx);
if (schdlr_sspnd.msb.blckrs)
res = 1;
else if (schdlr_sspnd.nmsb.blckrs)
res = -1;
else
res = 0;
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
return res;
}
@@ -8074,7 +8342,7 @@ erts_multi_scheduling_blockers(Process *p, int normal)
msbp = normal ? &schdlr_sspnd.nmsb : &schdlr_sspnd.msb;
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ erts_mtx_lock(&schdlr_sspnd.mtx);
if (!erts_proclist_is_empty(msbp->blckrs)) {
Eterm *hp, *hp_end;
ErtsProcList *plp1, *plp2;
@@ -8102,7 +8370,7 @@ erts_multi_scheduling_blockers(Process *p, int normal)
}
HRelease(p, hp_end, hp);
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_mtx_unlock(&schdlr_sspnd.mtx);
return res;
}
@@ -8112,16 +8380,17 @@ sched_thread_func(void *vesdp)
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
Uint no = esdp->no;
-#ifdef ERTS_SMP
erts_tse_t *tse;
-#endif
+ erts_port_task_pre_alloc_init_thread();
erts_sched_init_time_sup(esdp);
+ if (no == 1)
+ erts_aux_work_timeout_late_init(esdp);
+
(void) ERTS_RUNQ_FLGS_SET_NOB(esdp->run_queue,
ERTS_RUNQ_FLG_EXEC);
-#ifdef ERTS_SMP
tse = erts_tse_fetch();
erts_tse_prepare_timed(tse);
ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = tse;
@@ -8134,8 +8403,12 @@ sched_thread_func(void *vesdp)
erts_msacc_init_thread("scheduler", no, 1);
erts_thr_progress_register_managed_thread(esdp, &callbacks, 0);
- erts_alloc_register_scheduler(vesdp);
+
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ esdp->ssi->psi = erts_create_pollset_thread(-1, NULL);
#endif
+
+ erts_alloc_register_scheduler(vesdp);
#ifdef ERTS_ENABLE_LOCK_CHECK
{
char buf[31];
@@ -8144,18 +8417,14 @@ sched_thread_func(void *vesdp)
}
#endif
erts_tsd_set(sched_data_key, vesdp);
-#ifdef ERTS_SMP
#if HAVE_ERTS_MSEG
erts_mseg_late_init();
#endif
-#if ERTS_USE_ASYNC_READY_Q
esdp->aux_work_data.async_ready.queue = erts_get_async_ready_queue(no);
-#endif
erts_sched_init_check_cpu_bind(esdp);
erts_proc_lock_prepare_proc_lock_waiter();
-#endif
#ifdef HIPE
hipe_thread_signal_init();
@@ -8169,7 +8438,11 @@ sched_thread_func(void *vesdp)
ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
#endif
- process_main();
+ erts_alcu_sched_spec_data_init(esdp);
+ erts_ets_sched_spec_data_init(esdp);
+
+ process_main(esdp->x_reg_array, esdp->f_reg_array);
+
/* No schedulers should *ever* terminate */
erts_exit(ERTS_ABORT_EXIT,
"Scheduler thread number %beu terminated\n",
@@ -8177,15 +8450,12 @@ sched_thread_func(void *vesdp)
return NULL;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
static void*
sched_dirty_cpu_thread_func(void *vesdp)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
- Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
- ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_CPU_SCHEDULER;
+ Uint no = esdp->dirty_no;
ASSERT(no != 0);
ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
@@ -8194,6 +8464,8 @@ sched_dirty_cpu_thread_func(void *vesdp)
callbacks.wait = NULL;
callbacks.finalize_wait = NULL;
+ dirty_sched_wall_time_change(esdp, 1);
+
esdp->thr_id += erts_no_schedulers;
erts_msacc_init_thread("dirty_cpu_scheduler", no, 0);
@@ -8207,9 +8479,7 @@ sched_dirty_cpu_thread_func(void *vesdp)
}
#endif
erts_tsd_set(sched_data_key, vesdp);
-#if ERTS_USE_ASYNC_READY_Q
esdp->aux_work_data.async_ready.queue = NULL;
-#endif
erts_proc_lock_prepare_proc_lock_waiter();
@@ -8231,8 +8501,7 @@ sched_dirty_io_thread_func(void *vesdp)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
- Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp);
- ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_IO_SCHEDULER;
+ Uint no = esdp->dirty_no;
ASSERT(no != 0);
ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
@@ -8241,6 +8510,8 @@ sched_dirty_io_thread_func(void *vesdp)
callbacks.wait = NULL;
callbacks.finalize_wait = NULL;
+ dirty_sched_wall_time_change(esdp, 1);
+
esdp->thr_id += erts_no_schedulers + erts_no_dirty_cpu_schedulers;
erts_msacc_init_thread("dirty_io_scheduler", no, 0);
@@ -8254,9 +8525,7 @@ sched_dirty_io_thread_func(void *vesdp)
}
#endif
erts_tsd_set(sched_data_key, vesdp);
-#if ERTS_USE_ASYNC_READY_Q
esdp->aux_work_data.async_ready.queue = NULL;
-#endif
erts_proc_lock_prepare_proc_lock_waiter();
@@ -8272,40 +8541,38 @@ sched_dirty_io_thread_func(void *vesdp)
no);
return NULL;
}
-#endif
-#endif
-
-static ethr_tid aux_tid;
void
erts_start_schedulers(void)
{
+ ethr_tid tid;
int res = 0;
Uint actual;
Uint wanted = erts_no_schedulers;
Uint wanted_no_schedulers = erts_no_schedulers;
char name[16];
ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER;
+ int ix;
opts.detached = 1;
opts.name = name;
-#ifdef ERTS_SMP
if (erts_runq_supervision_interval) {
opts.suggested_stack_size = 16;
erts_snprintf(opts.name, 16, "runq_supervisor");
erts_atomic_init_nob(&runq_supervisor_sleeping, 0);
if (0 != ethr_event_init(&runq_supervision_event))
erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision event\n");
- if (0 != ethr_thr_create(&runq_supervisor_tid,
- runq_supervisor,
- NULL,
- &opts))
- erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision thread\n");
+ res = ethr_thr_create(&runq_supervisor_tid,
+ runq_supervisor,
+ NULL,
+ &opts);
+ if (0 != res)
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision thread, "
+ "error = %d\n", res);
}
-#endif
opts.suggested_stack_size = erts_sched_thread_suggested_stack_size;
@@ -8331,35 +8598,40 @@ erts_start_schedulers(void)
}
erts_no_schedulers = actual;
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
{
- int ix;
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
erts_snprintf(opts.name, 16, "%d_dirty_cpu_scheduler", ix + 1);
+ opts.suggested_stack_size = erts_dcpu_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts);
if (res != 0)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty cpu scheduler thread %d\n", ix);
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty cpu scheduler thread %d, error = %d\n", ix, res);
}
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
erts_snprintf(opts.name, 16, "%d_dirty_io_scheduler", ix + 1);
+ opts.suggested_stack_size = erts_dio_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts);
if (res != 0)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d\n", ix);
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d, error = %d\n", ix, res);
}
}
-#endif
-#endif
ERTS_THR_MEMORY_BARRIER;
erts_snprintf(opts.name, 16, "aux");
- res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts);
+ res = ethr_thr_create(&tid, aux_thread, NULL, &opts);
if (res != 0)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create aux thread\n");
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create aux thread, error = %d\n", res);
+
+ for (ix = 0; ix < erts_no_poll_threads; ix++) {
+ erts_snprintf(opts.name, 16, "%d_poller", ix);
+
+ res = ethr_thr_create(&tid, poll_thread, (void*)(UWord)ix, &opts);
+ if (res != 0)
+ erts_exit(ERTS_ERROR_EXIT, "Failed to create poll thread\n");
+ }
if (actual < 1)
erts_exit(ERTS_ERROR_EXIT,
@@ -8378,436 +8650,22 @@ erts_start_schedulers(void)
}
}
-#endif /* ERTS_SMP */
-
-#ifdef ERTS_SMP
-
-static void
-add_pend_suspend(Process *suspendee,
- Eterm originator_pid,
- void (*handle_func)(Process *,
- ErtsProcLocks,
- int,
- Eterm))
-{
- ErtsPendingSuspend *psp = erts_alloc(ERTS_ALC_T_PEND_SUSPEND,
- sizeof(ErtsPendingSuspend));
- psp->next = NULL;
-#ifdef DEBUG
-#if defined(ARCH_64)
- psp->end = (ErtsPendingSuspend *) 0xdeaddeaddeaddead;
-#else
- psp->end = (ErtsPendingSuspend *) 0xdeaddead;
-#endif
-#endif
- psp->pid = originator_pid;
- psp->handle_func = handle_func;
-
- if (suspendee->pending_suspenders)
- suspendee->pending_suspenders->end->next = psp;
- else
- suspendee->pending_suspenders = psp;
- suspendee->pending_suspenders->end = psp;
-}
-
-static void
-handle_pending_suspend(Process *p, ErtsProcLocks p_locks)
-{
- ErtsPendingSuspend *psp;
- int is_alive = !ERTS_PROC_IS_EXITING(p);
-
- ERTS_SMP_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS);
-
- /*
- * New pending suspenders might appear while we are processing
- * (since we may release the status lock on p while processing).
- */
- while (p->pending_suspenders) {
- psp = p->pending_suspenders;
- p->pending_suspenders = NULL;
- while (psp) {
- ErtsPendingSuspend *free_psp;
- (*psp->handle_func)(p, p_locks, is_alive, psp->pid);
- free_psp = psp;
- psp = psp->next;
- erts_free(ERTS_ALC_T_PEND_SUSPEND, (void *) free_psp);
- }
- }
-
-}
-
-static ERTS_INLINE void
-cancel_suspend_of_suspendee(Process *p, ErtsProcLocks p_locks)
-{
- if (is_not_nil(p->suspendee)) {
- Process *rp;
- if (!(p_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- rp = erts_pid2proc(p, p_locks|ERTS_PROC_LOCK_STATUS,
- p->suspendee, ERTS_PROC_LOCK_STATUS);
- if (rp) {
- erts_resume(rp, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- }
- if (!(p_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- p->suspendee = NIL;
- }
-}
-
-static void
-handle_pend_sync_suspend(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm suspender_pid)
-{
- Process *suspender;
-
- ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
-
- suspender = erts_pid2proc(suspendee,
- suspendee_locks,
- suspender_pid,
- ERTS_PROC_LOCK_STATUS);
- if (suspender) {
- ASSERT(is_nil(suspender->suspendee));
- if (suspendee_alive) {
- erts_suspend(suspendee, suspendee_locks, NULL);
- suspender->suspendee = suspendee->common.id;
- }
- /* suspender is suspended waiting for suspendee to suspend;
- resume suspender */
- ASSERT(suspendee != suspender);
- resume_process(suspender, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
- }
-}
-
-static Process *
-pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks, int suspend)
-{
- Process *rp;
- int unlock_c_p_status;
-
- ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
-
- ERTS_SMP_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS));
-
- if (c_p->common.id == pid)
- return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
-
- if (c_p_locks & ERTS_PROC_LOCK_STATUS)
- unlock_c_p_status = 0;
- else {
- unlock_c_p_status = 1;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
- }
-
- if (c_p->suspendee == pid) {
- /* Process previously suspended by c_p (below)... */
- ErtsProcLocks rp_locks = pid_locks|ERTS_PROC_LOCK_STATUS;
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, pid, rp_locks);
- c_p->suspendee = NIL;
- ASSERT(c_p->flags & F_P2PNR_RESCHED);
- c_p->flags &= ~F_P2PNR_RESCHED;
- if (!suspend && rp)
- resume_process(rp, rp_locks);
- }
- else {
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, ERTS_PROC_LOCK_STATUS);
-
- if (!rp) {
- c_p->flags &= ~F_P2PNR_RESCHED;
- goto done;
- }
-
- ASSERT(!(c_p->flags & F_P2PNR_RESCHED));
-
- /*
- * Suspend the other process in order to prevent
- * it from being selected for normal execution.
- * This will however not prevent it from being
- * selected for execution of a system task. If
- * it is selected for execution of a system task
- * we might be blocked for quite a while if the
- * try-lock below fails. That is, there is room
- * for improvement here...
- */
-
- if (!suspend_process(c_p, rp)) {
- /* Other process running */
-
- ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)
- & erts_smp_atomic32_read_nob(&rp->state));
-
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!suspend
- && (erts_smp_atomic32_read_nob(&rp->state)
- & ERTS_PSFLG_DIRTY_RUNNING)) {
- ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
- if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, pid_locks|ERTS_PROC_LOCK_STATUS);
- }
- goto done;
- }
-#endif
-
- running:
-
- /*
- * If we got pending suspenders and suspend ourselves waiting
- * to suspend another process we might deadlock.
- * In this case we have to yield, be suspended by
- * someone else and then do it all over again.
- */
- if (!c_p->pending_suspenders) {
- /* Mark rp pending for suspend by c_p */
- add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend);
- ASSERT(is_nil(c_p->suspendee));
-
- /* Suspend c_p; when rp is suspended c_p will be resumed. */
- suspend_process(c_p, c_p);
- c_p->flags |= F_P2PNR_RESCHED;
- }
- /* Yield (caller is assumed to yield immediately in bif). */
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- rp = ERTS_PROC_LOCK_BUSY;
- }
- else {
- ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
- if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
- if ((ERTS_PSFLG_RUNNING_SYS|ERTS_PSFLG_DIRTY_RUNNING_SYS)
- & erts_smp_atomic32_read_nob(&rp->state)) {
- /* Executing system task... */
- resume_process(rp, ERTS_PROC_LOCK_STATUS);
- goto running;
- }
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- /*
- * If we are unlucky, the process just got selected for
- * execution of a system task. In this case we may be
- * blocked here for quite a while... Execution of system
- * tasks are fortunately quite rare events. We try to
- * avoid this by checking if it is in a state executing
- * system tasks (above), but it will not prevent all
- * scenarios for a long block here...
- */
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, pid_locks|ERTS_PROC_LOCK_STATUS);
- if (!rp)
- goto done;
- }
-
- /*
- * The previous suspend has prevented the process
- * from being selected for normal execution regardless
- * of locks held or not held on it...
- */
- ASSERT(!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS)
- & erts_smp_atomic32_read_nob(&rp->state)));
-
- if (!suspend)
- resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
- }
- }
-
- done:
-
- if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- if (unlock_c_p_status)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
- return rp;
-}
-
-
-/*
- * Like erts_pid2proc() but:
- *
- * * At least ERTS_PROC_LOCK_MAIN have to be held on c_p.
- * * At least ERTS_PROC_LOCK_MAIN have to be taken on pid.
- * * It also waits for proc to be in a state != running and garbing.
- * * If ERTS_PROC_LOCK_BUSY is returned, the calling process has to
- * yield (ERTS_BIF_YIELD[0-3]()). c_p might in this case have been
- * suspended.
- */
-Process *
-erts_pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- return pid2proc_not_running(c_p, c_p_locks, pid, pid_locks, 0);
-}
-
-/*
- * Like erts_pid2proc_not_running(), but hands over the process
- * in a suspended state unless (c_p is looked up).
- */
-Process *
-erts_pid2proc_suspend(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- return pid2proc_not_running(c_p, c_p_locks, pid, pid_locks, 1);
-}
-
-/*
- * erts_pid2proc_nropt() is normally the same as
- * erts_pid2proc_not_running(). However it is only
- * to be used when 'not running' is a pure optimization,
- * not a requirement.
- */
-
-Process *
-erts_pid2proc_nropt(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- if (erts_disable_proc_not_running_opt)
- return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
- else
- return erts_pid2proc_not_running(c_p, c_p_locks, pid, pid_locks);
-}
-
-static ERTS_INLINE int
-do_bif_suspend_process(Process *c_p,
- ErtsSuspendMonitor *smon,
- Process *suspendee)
-{
- ASSERT(suspendee);
- ASSERT(!ERTS_PROC_IS_EXITING(suspendee));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- & erts_proc_lc_my_proc_locks(suspendee));
- if (smon) {
- if (!smon->active) {
- if (!suspend_process(c_p, suspendee))
- return 0;
- }
- smon->active += smon->pending;
- ASSERT(smon->active);
- smon->pending = 0;
- return 1;
- }
- return 0;
-}
-
-static void
-handle_pend_bif_sync_suspend(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm suspender_pid)
-{
- Process *suspender;
-
- ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
-
- suspender = erts_pid2proc(suspendee,
- suspendee_locks,
- suspender_pid,
- ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
- if (suspender) {
- ASSERT(is_nil(suspender->suspendee));
- if (!suspendee_alive)
- erts_delete_suspend_monitor(&suspender->suspend_monitors,
- suspendee->common.id);
- else {
-#ifdef DEBUG
- int res;
-#endif
- ErtsSuspendMonitor *smon;
- smon = erts_lookup_suspend_monitor(suspender->suspend_monitors,
- suspendee->common.id);
-#ifdef DEBUG
- res =
-#endif
- do_bif_suspend_process(suspendee, smon, suspendee);
- ASSERT(!smon || res != 0);
- suspender->suspendee = suspendee->common.id;
- }
- /* suspender is suspended waiting for suspendee to suspend;
- resume suspender */
- ASSERT(suspender != suspendee);
- resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(suspender,
- ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
- }
-}
-
-static void
-handle_pend_bif_async_suspend(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm suspender_pid)
-{
-
- Process *suspender;
-
- ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
-
- suspender = erts_pid2proc(suspendee,
- suspendee_locks,
- suspender_pid,
- ERTS_PROC_LOCK_LINK);
- if (suspender) {
- ASSERT(is_nil(suspender->suspendee));
- if (!suspendee_alive)
- erts_delete_suspend_monitor(&suspender->suspend_monitors,
- suspendee->common.id);
- else {
-#ifdef DEBUG
- int res;
-#endif
- ErtsSuspendMonitor *smon;
- smon = erts_lookup_suspend_monitor(suspender->suspend_monitors,
- suspendee->common.id);
-#ifdef DEBUG
- res =
-#endif
- do_bif_suspend_process(suspendee, smon, suspendee);
- ASSERT(!smon || res != 0);
- }
- erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_LINK);
- }
-}
-
-#else
-
-/*
- * Non-smp version of erts_pid2proc_suspend().
- */
-Process *
-erts_pid2proc_suspend(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- Process *rp = erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
- if (rp)
- erts_suspend(rp, pid_locks, NULL);
- return rp;
-}
-
-#endif /* ERTS_SMP */
-
-/*
- * The erlang:suspend_process/2 BIF
- */
-
BIF_RETTYPE
-suspend_process_2(BIF_ALIST_2)
+erts_internal_suspend_process_2(BIF_ALIST_2)
{
Eterm res;
- Process* suspendee = NULL;
- ErtsSuspendMonitor *smon;
- ErtsProcLocks xlocks = (ErtsProcLocks) 0;
-
- /* Options and default values: */
- int asynchronous = 0;
+ Eterm reply_tag = THE_NON_VALUE;
+ Eterm reply_res = THE_NON_VALUE;
+ int suspend;
+ int sync = 0;
+ int async = 0;
int unless_suspending = 0;
-
+ erts_aint_t mstate;
+ ErtsMonitorSuspend *msp;
+ ErtsMonitorData *mdp;
if (BIF_P->common.id == BIF_ARG_1)
- goto badarg; /* We are not allowed to suspend ourselves */
+ BIF_RET(am_badarg); /* We are not allowed to suspend ourselves */
if (is_not_nil(BIF_ARG_2)) {
/* Parse option list */
@@ -8821,213 +8679,128 @@ suspend_process_2(BIF_ALIST_2)
unless_suspending = 1;
break;
case am_asynchronous:
- asynchronous = 1;
+ async = 1;
break;
- default:
- goto badarg;
+ default: {
+ if (is_tuple_arity(arg, 2)) {
+ Eterm *tp = tuple_val(arg);
+ if (tp[1] == am_asynchronous) {
+ async = 1;
+ reply_tag = tp[2];
+ break;
+ }
+ }
+ BIF_RET(am_badarg);
}
+ }
arg = CDR(lp);
- }
+ }
if (is_not_nil(arg))
- goto badarg;
+ BIF_RET(am_badarg);
}
- xlocks = ERTS_PROC_LOCK_LINK | (asynchronous
- ? (ErtsProcLocks) 0
- : ERTS_PROC_LOCK_STATUS);
-
- erts_smp_proc_lock(BIF_P, xlocks);
+ if (!unless_suspending) {
+ ErtsMonitor *mon;
+ mon = erts_monitor_tree_lookup_create(&ERTS_P_MONITORS(BIF_P),
+ &suspend,
+ ERTS_MON_TYPE_SUSPEND,
+ BIF_P->common.id,
+ BIF_ARG_1);
+ ASSERT(mon->other.item == BIF_ARG_1);
- suspendee = erts_pid2proc(BIF_P,
- ERTS_PROC_LOCK_MAIN|xlocks,
- BIF_ARG_1,
- ERTS_PROC_LOCK_STATUS);
- if (!suspendee)
- goto no_suspendee;
+ mdp = erts_monitor_to_data(mon);
+ msp = (ErtsMonitorSuspend *) mdp;
- smon = erts_add_or_lookup_suspend_monitor(&BIF_P->suspend_monitors,
- BIF_ARG_1);
-#ifndef ERTS_SMP /* no ERTS_SMP */
-
- /* This is really a piece of cake without SMP support... */
- if (!smon->active) {
- erts_smp_atomic32_read_bor_nob(&suspendee->state, ERTS_PSFLG_SUSPENDED);
- suspend_process(BIF_P, suspendee);
- smon->active++;
- res = am_true;
+ mstate = erts_atomic_inc_read_relb(&msp->state);
+ ASSERT(suspend || (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) > 1);
+ sync = !async & !suspend & !(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ suspend = !!suspend; /* ensure 0|1 */
+ res = am_true;
}
- else if (unless_suspending)
- res = am_false;
- else if (smon->active == INT_MAX)
- goto system_limit;
else {
- smon->active++;
- res = am_true;
+ ErtsMonitor *mon;
+ mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(BIF_P),
+ BIF_ARG_1);
+ if (mon) {
+ ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND);
+ mdp = erts_monitor_to_data(mon);
+ msp = (ErtsMonitorSuspend *) mdp;
+ mstate = erts_atomic_read_nob(&msp->state);
+ ASSERT((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) > 0);
+ mdp = NULL;
+ sync = !async & !(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ suspend = 0;
+ res = am_false;
+ }
+ else {
+ mdp = erts_monitor_create(ERTS_MON_TYPE_SUSPEND, NIL,
+ BIF_P->common.id,
+ BIF_ARG_1, NIL);
+ mon = &mdp->origin;
+ erts_monitor_tree_insert(&ERTS_P_MONITORS(BIF_P), mon);
+ msp = (ErtsMonitorSuspend *) mdp;
+ mstate = erts_atomic_inc_read_relb(&msp->state);
+ ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE));
+ suspend = !0;
+ res = am_true;
+ }
}
-#else /* ERTS_SMP */
-
- /* ... but a little trickier with SMP support ... */
-
- if (asynchronous) {
- /* --- Asynchronous suspend begin ---------------------------------- */
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_LINK
- & erts_proc_lc_my_proc_locks(BIF_P));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- == erts_proc_lc_my_proc_locks(suspendee));
-
- if (smon->active) {
- smon->active += smon->pending;
- smon->pending = 0;
- if (unless_suspending)
- res = am_false;
- else if (smon->active == INT_MAX)
- goto system_limit;
- else {
- smon->active++;
- res = am_true;
- }
- /* done */
- }
- else {
- /* We havn't got any active suspends on the suspendee */
- if (smon->pending && unless_suspending)
- res = am_false;
- else {
- if (smon->pending == INT_MAX)
- goto system_limit;
-
- smon->pending++;
-
- if (!do_bif_suspend_process(BIF_P, smon, suspendee))
- add_pend_suspend(suspendee,
- BIF_P->common.id,
- handle_pend_bif_async_suspend);
-
- res = am_true;
- }
- /* done */
- }
- /* --- Asynchronous suspend end ------------------------------------ */
- }
- else /* if (!asynchronous) */ {
- /* --- Synchronous suspend begin ----------------------------------- */
-
- ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS)
- & erts_proc_lc_my_proc_locks(BIF_P))
- == (ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- == erts_proc_lc_my_proc_locks(suspendee));
-
- if (BIF_P->suspendee == BIF_ARG_1) {
- /* We are back after a yield and the suspendee
- has been suspended on behalf of us. */
- ASSERT(smon->active >= 1);
- BIF_P->suspendee = NIL;
- res = (!unless_suspending || smon->active == 1
- ? am_true
- : am_false);
- /* done */
- }
- else if (smon->active) {
- if (unless_suspending)
- res = am_false;
- else {
- smon->active++;
- res = am_true;
- }
- /* done */
- }
- else {
- /* We haven't got any active suspends on the suspendee */
-
- /*
- * If we have pending suspenders and suspend ourselves waiting
- * to suspend another process, or suspend another process
- * we might deadlock. In this case we have to yield,
- * be suspended by someone else, and then do it all over again.
- */
- if (BIF_P->pending_suspenders)
- goto yield;
-
- if (!unless_suspending && smon->pending == INT_MAX)
- goto system_limit;
- if (!unless_suspending || smon->pending == 0)
- smon->pending++;
-
- if (do_bif_suspend_process(BIF_P, smon, suspendee)) {
- res = (!unless_suspending || smon->active == 1
- ? am_true
- : am_false);
- /* done */
- }
- else {
- /* Mark suspendee pending for suspend by BIF_P */
- add_pend_suspend(suspendee,
- BIF_P->common.id,
- handle_pend_bif_sync_suspend);
-
- ASSERT(is_nil(BIF_P->suspendee));
-
- /*
- * Suspend BIF_P; when suspendee is suspended, BIF_P
- * will be resumed and this BIF will be called again.
- * This time with BIF_P->suspendee == BIF_ARG_1 (see
- * above).
- */
- suspend_process(BIF_P, BIF_P);
- goto yield;
- }
- }
- /* --- Synchronous suspend end ------------------------------------- */
+ if (suspend) {
+ erts_aint32_t state;
+ Process *rp;
+ int send_sig = 0;
+
+ /* fail state... */
+ state = (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS);
+
+ rp = erts_try_lock_sig_free_proc(BIF_ARG_1,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
+ &state);
+ if (!rp)
+ goto noproc;
+ if (rp == ERTS_PROC_LOCK_BUSY)
+ send_sig = !0;
+ else {
+ send_sig = !suspend_process(BIF_P, rp);
+ if (!send_sig) {
+ erts_monitor_list_insert(&ERTS_P_LT_MONITORS(rp), &mdp->target);
+ erts_atomic_read_bor_relb(&msp->state,
+ ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ }
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ }
+ if (send_sig) {
+ if (erts_proc_sig_send_monitor(&mdp->target, BIF_ARG_1))
+ sync = !async;
+ else {
+ noproc:
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(BIF_P), &mdp->origin);
+ erts_monitor_release_both(mdp);
+ if (!async)
+ res = am_badarg;
+ }
+ }
}
-#endif /* ERTS_SMP */
-#ifdef DEBUG
- {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&suspendee->state);
- ASSERT((state & ERTS_PSFLG_SUSPENDED)
- || (asynchronous && smon->pending));
- ASSERT((state & ERTS_PSFLG_SUSPENDED)
- || !smon->active);
+ if (sync) {
+ ASSERT(is_non_value(reply_tag));
+ reply_res = res;
+ reply_tag = res = erts_make_ref(BIF_P);
+ ERTS_RECV_MARK_SAVE(BIF_P);
+ ERTS_RECV_MARK_SET(BIF_P);
}
-#endif
-
- erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(BIF_P, xlocks);
- BIF_RET(res);
- system_limit:
- ERTS_BIF_PREP_ERROR(res, BIF_P, SYSTEM_LIMIT);
- goto do_return;
-
- no_suspendee:
-#ifdef ERTS_SMP
- BIF_P->suspendee = NIL;
-#endif
- erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
-
- badarg:
- ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
-#ifdef ERTS_SMP
- goto do_return;
-
- yield:
- ERTS_BIF_PREP_YIELD2(res, bif_export[BIF_suspend_process_2],
- BIF_P, BIF_ARG_1, BIF_ARG_2);
-#endif
-
- do_return:
- if (suspendee)
- erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- if (xlocks)
- erts_smp_proc_unlock(BIF_P, xlocks);
- return res;
+ if (is_value(reply_tag))
+ erts_proc_sig_send_sync_suspend(BIF_P, BIF_ARG_1, reply_tag, reply_res);
+ BIF_RET(res);
}
-
/*
* The erlang:resume_process/1 BIF
*/
@@ -9035,80 +8808,33 @@ suspend_process_2(BIF_ALIST_2)
BIF_RETTYPE
resume_process_1(BIF_ALIST_1)
{
- ErtsSuspendMonitor *smon;
- Process *suspendee;
- int is_active;
+ ErtsMonitor *mon;
+ ErtsMonitorSuspend *msp;
+ erts_aint_t mstate;
if (BIF_P->common.id == BIF_ARG_1)
BIF_ERROR(BIF_P, BADARG);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
- smon = erts_lookup_suspend_monitor(BIF_P->suspend_monitors, BIF_ARG_1);
-
- if (!smon) {
+ mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(BIF_P),
+ BIF_ARG_1);
+ if (!mon) {
/* No previous suspend or dead suspendee */
- goto error;
- }
- else if (smon->pending) {
- smon->pending--;
- ASSERT(smon->pending >= 0);
- if (smon->active) {
- smon->active += smon->pending;
- smon->pending = 0;
- }
- is_active = smon->active;
- }
- else if (smon->active) {
- smon->active--;
- ASSERT(smon->pending >= 0);
- is_active = 1;
- }
- else {
- /* No previous suspend or dead suspendee */
- goto error;
+ BIF_ERROR(BIF_P, BADARG);
}
- if (smon->active || smon->pending || !is_active) {
- /* Leave the suspendee as it is; just verify that it is still alive */
- suspendee = erts_pid2proc(BIF_P,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
- BIF_ARG_1,
- 0);
- if (!suspendee)
- goto no_suspendee;
+ ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND);
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
- }
- else {
- /* Resume */
- suspendee = erts_pid2proc(BIF_P,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
- BIF_ARG_1,
- ERTS_PROC_LOCK_STATUS);
- if (!suspendee)
- goto no_suspendee;
+ mstate = erts_atomic_dec_read_relb(&msp->state);
- ASSERT(ERTS_PSFLG_SUSPENDED
- & erts_smp_atomic32_read_nob(&suspendee->state));
- ASSERT(BIF_P != suspendee);
- resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
+ ASSERT((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) >= 0);
- erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
+ if ((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) == 0) {
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(BIF_P), mon);
+ erts_proc_sig_send_demonitor(mon);
}
- if (!smon->active && !smon->pending)
- erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
-
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
-
BIF_RET(am_true);
-
- no_suspendee:
- /* cleanup */
- erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
-
- error:
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
- BIF_ERROR(BIF_P, BADARG);
}
BIF_RETTYPE
@@ -9116,53 +8842,78 @@ erts_internal_is_process_executing_dirty_1(BIF_ALIST_1)
{
if (is_not_internal_pid(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
-#ifdef ERTS_DIRTY_SCHEDULERS
else {
Process *rp = erts_proc_lookup(BIF_ARG_1);
if (rp) {
- erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
if (state & (ERTS_PSFLG_DIRTY_RUNNING
|ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
BIF_RET(am_true);
}
}
}
-#endif
BIF_RET(am_false);
}
+static ERTS_INLINE void
+run_queues_len_aux(ErtsRunQueue *rq, Uint *tot_len, Uint *qlen, int *ip, int incl_active_sched, int locked)
+{
+ Sint rq_len;
+
+ if (locked)
+ rq_len = (Sint) erts_atomic32_read_dirty(&rq->len);
+ else
+ rq_len = (Sint) erts_atomic32_read_nob(&rq->len);
+ ASSERT(rq_len >= 0);
+
+ if (incl_active_sched) {
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
+ erts_aint32_t dcnt;
+ if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(rq)) {
+ dcnt = erts_atomic32_read_nob(&dirty_count.cpu.active);
+ ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_cpu_schedulers);
+ }
+ else {
+ ASSERT(ERTS_RUNQ_IS_DIRTY_IO_RUNQ(rq));
+ dcnt = erts_atomic32_read_nob(&dirty_count.io.active);
+ ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_io_schedulers);
+ }
+ rq_len += (Sint) dcnt;
+ }
+ else
+ {
+ if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)
+ rq_len++;
+ }
+ }
+ if (qlen)
+ qlen[(*ip)++] = rq_len;
+ *tot_len += (Uint) rq_len;
+}
Uint
-erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched)
+erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched,
+ int incl_dirty_io)
{
- int i = 0;
+ int i = 0, j = 0;
Uint len = 0;
- if (atomic_queues_read)
- ERTS_ATOMIC_FOREACH_RUNQ(rq,
- {
- Sint rq_len = (Sint) erts_smp_atomic32_read_dirty(&rq->len);
- ASSERT(rq_len >= 0);
- if (incl_active_sched
- && (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)) {
- rq_len++;
- }
- if (qlen)
- qlen[i++] = rq_len;
- len += (Uint) rq_len;
- }
- );
+ int no_rqs = erts_no_run_queues;
+
+ if (incl_dirty_io)
+ no_rqs += ERTS_NUM_DIRTY_RUNQS;
+ else
+ no_rqs += ERTS_NUM_DIRTY_CPU_RUNQS;
+
+ if (atomic_queues_read) {
+ ERTS_ATOMIC_FOREACH_RUNQ_X(rq, no_rqs,
+ run_queues_len_aux(rq, &len, qlen, &j,
+ incl_active_sched, 1),
+ /* Nothing... */);
+ }
else {
- for (i = 0; i < erts_no_run_queues; i++) {
+ for (i = 0; i < no_rqs; i++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(i);
- Sint rq_len = (Sint) erts_smp_atomic32_read_nob(&rq->len);
- ASSERT(rq_len >= 0);
- if (incl_active_sched
- && (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)) {
- rq_len++;
- }
- if (qlen)
- qlen[i] = rq_len;
- len += (Uint) rq_len;
+ run_queues_len_aux(rq, &len, qlen, &j, incl_active_sched, 0);
}
}
@@ -9205,27 +8956,25 @@ erts_process_status(Process *rp, Eterm rpid)
Process *p = rp ? rp : erts_proc_lookup_raw(rpid);
if (p) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
+ erts_aint32_t state = erts_atomic32_read_acqb(&p->state);
res = erts_process_state2status(state);
}
-#ifdef ERTS_SMP
else {
int i;
ErtsSchedulerData *esdp;
for (i = 0; i < erts_no_schedulers; i++) {
esdp = ERTS_SCHEDULER_IX(i);
- erts_smp_runq_lock(esdp->run_queue);
+ erts_runq_lock(esdp->run_queue);
if (esdp->free_process
&& esdp->free_process->common.id == rpid) {
res = am_free;
- erts_smp_runq_unlock(esdp->run_queue);
+ erts_runq_unlock(esdp->run_queue);
break;
}
- erts_smp_runq_unlock(esdp->run_queue);
+ erts_runq_unlock(esdp->run_queue);
}
}
-#endif
return res;
}
@@ -9241,9 +8990,9 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port)
int suspend;
ASSERT(c_p == erts_get_current_process());
- ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
+ ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
if (!(c_p_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
if (busy_port)
suspend = erts_save_suspend_process_on_port(busy_port, c_p);
@@ -9259,7 +9008,7 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port)
}
if (!(c_p_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
if (suspend && busy_port && erts_system_monitor_flags.busy_port)
monitor_generic(c_p, am_busy_port, busy_port->common.id);
@@ -9268,12 +9017,12 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port)
void
erts_resume(Process* process, ErtsProcLocks process_locks)
{
- ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
+ ERTS_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(process, ERTS_PROC_LOCK_STATUS);
resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS);
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
}
int
@@ -9293,7 +9042,7 @@ erts_resume_processes(ErtsProcList *list)
resume_process(proc, ERTS_PROC_LOCK_STATUS);
nresumed++;
}
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
}
fplp = plp;
plp = plp->next;
@@ -9303,9 +9052,8 @@ erts_resume_processes(ErtsProcList *list)
}
Eterm
-erts_get_process_priority(Process *p)
+erts_get_process_priority(erts_aint32_t state)
{
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
switch (ERTS_PSFLGS_GET_USR_PRIO(state)) {
case PRIORITY_MAX: return am_max;
case PRIORITY_HIGH: return am_high;
@@ -9328,7 +9076,7 @@ erts_set_process_priority(Process *p, Eterm value)
default: return THE_NON_VALUE; break;
}
- a = erts_smp_atomic32_read_nob(&p->state);
+ a = erts_atomic32_read_nob(&p->state);
if (nprio == ERTS_PSFLGS_GET_USR_PRIO(a))
oprio = nprio;
else {
@@ -9336,7 +9084,7 @@ erts_set_process_priority(Process *p, Eterm value)
erts_aint32_t e, n, aprio;
if (a & ERTS_PSFLG_ACTIVE_SYS) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
slocked = 1;
}
@@ -9350,12 +9098,15 @@ erts_set_process_priority(Process *p, Eterm value)
int max_qbit;
if (!slocked) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
slocked = 1;
}
max_qbit = 0;
- if (a & ERTS_PSFLG_ACTIVE_SYS)
+ ASSERT((a & ERTS_PSFLG_SYS_TASKS)
+ ? !!p->sys_task_qs
+ : !p->sys_task_qs);
+ if (a & ERTS_PSFLG_SYS_TASKS)
max_qbit |= p->sys_task_qs->qmask;
if (a & ERTS_PSFLG_DELAYED_SYS) {
ErtsProcSysTaskQs *qs;
@@ -9378,8 +9129,8 @@ erts_set_process_priority(Process *p, Eterm value)
aprio = PRIORITY_LOW;
break;
default:
- ERTS_INTERNAL_ERROR("Invalid qmask");
- aprio = -1;
+ aprio = nprio;
+ break;
}
if (aprio > nprio) /* low value -> high prio */
@@ -9391,11 +9142,11 @@ erts_set_process_priority(Process *p, Eterm value)
n |= ((nprio << ERTS_PSFLGS_USR_PRIO_OFFSET)
| (aprio << ERTS_PSFLGS_ACT_PRIO_OFFSET));
- a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
} while (a != e);
if (slocked)
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
@@ -9429,6 +9180,20 @@ scheduler_gc_proc(Process *c_p, int reds_left)
return reds;
}
+static void
+unlock_lock_rq(int pre_free, void *vrq)
+{
+ ErtsRunQueue *rq = vrq;
+ if (pre_free)
+ erts_runq_unlock(rq);
+ else
+ erts_runq_lock(rq);
+}
+
+
+static void trace_schedule_in(Process *p, erts_aint32_t state);
+static void trace_schedule_out(Process *p, erts_aint32_t state);
+
/*
* schedule() is called from BEAM (process_main()) or HiPE
* (hipe_mode_switch()) when the current process is to be
@@ -9453,7 +9218,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ErtsRunQueue *rq;
int context_reds;
int fcalls;
- int input_reductions;
int actual_reds;
int reds;
Uint32 flags;
@@ -9473,21 +9237,18 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (ERTS_USE_MODIFIED_TIMING()) {
context_reds = ERTS_MODIFIED_TIMING_CONTEXT_REDS;
- input_reductions = ERTS_MODIFIED_TIMING_INPUT_REDS;
}
else {
context_reds = CONTEXT_REDS;
- input_reductions = INPUT_REDUCTIONS;
}
- ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())
+ ERTS_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())
|| !erts_thr_progress_is_blocking());
/*
* Clean up after the process being scheduled out.
*/
if (!p) { /* NULL in the very first schedule() call */
-#ifdef ERTS_DIRTY_SCHEDULERS
is_normal_sched = !esdp;
if (is_normal_sched) {
esdp = erts_get_scheduler_data();
@@ -9496,168 +9257,132 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
else {
ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
}
-#else
- esdp = erts_get_scheduler_data();
- is_normal_sched = 1;
-#endif
rq = erts_get_runq_current(esdp);
ASSERT(esdp);
- fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls);
actual_reds = reds = 0;
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
} else {
-#ifdef ERTS_SMP
-#ifdef ERTS_DIRTY_SCHEDULERS
is_normal_sched = !esdp;
if (is_normal_sched) {
- esdp = p->scheduler_data;
+ esdp = p->scheduler_data;
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ if (esdp->pending_signal.sig) {
+ ASSERT(esdp->pending_signal.dbg_from == p);
+ erts_proc_sig_send_pending(esdp);
+ }
}
else {
ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
}
-#else
- esdp = p->scheduler_data;
- is_normal_sched = 1;
-#endif
ASSERT(esdp->current_process == p
|| esdp->free_process == p);
-#else
- esdp = erts_scheduler_data;
- ASSERT(esdp->current_process == p);
- is_normal_sched = 1;
-#endif
-
- sched_out_proc:
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
reds = actual_reds = calls - esdp->virtual_reds;
+ internal_sched_out_proc:
+
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ ASSERT(p->scheduler_data || ERTS_SCHEDULER_IS_DIRTY(esdp));
+
ASSERT(actual_reds >= 0);
if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
esdp->virtual_reds = 0;
- fcalls = (int) erts_smp_atomic32_add_read_acqb(&function_calls, reds);
ASSERT(esdp && esdp == erts_get_scheduler_data());
rq = erts_get_runq_current(esdp);
p->reds += actual_reds;
- state = erts_smp_atomic32_read_nob(&p->state);
-
- if (IS_TRACED(p)) {
- if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE))
- erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
- if ((state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING) {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, ERTS_PROC_LOCK_MAIN,
- ((state & ERTS_PSFLG_FREE)
- ? am_out_exited
- : am_out_exiting));
- }
- else {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
- ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
- trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out);
- }
- }
+ state = erts_atomic32_read_nob(&p->state);
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ if (IS_TRACED(p))
+ trace_schedule_out(p, state);
+
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
-#ifdef ERTS_SMP
if (p->trace_msg_q) {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
erts_schedule_flush_trace_messages(p, 1);
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
}
-#endif
/* have to re-read state after taking lock */
- state = erts_smp_atomic32_read_nob(&p->state);
-
-#ifdef ERTS_SMP
- if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT))
- erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_TRACE
- | ERTS_PROC_LOCK_STATUS));
- if (p->pending_suspenders)
- handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_TRACE
- | ERTS_PROC_LOCK_STATUS));
-#endif
+ state = erts_atomic32_read_nob(&p->state);
esdp->reductions += reds;
- /* schedule_out_process() returns with rq locked! */
- schedule_out_process(rq, state, p, proxy_p, is_normal_sched);
- proxy_p = NULL;
+ {
+ int dec_refc;
- ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
- (int) ERTS_PSFLGS_GET_USR_PRIO(state),
- reds,
- actual_reds);
+ /* schedule_out_process() returns with rq locked! */
+ dec_refc = schedule_out_process(rq, state, p,
+ proxy_p, is_normal_sched);
+ proxy_p = NULL;
- esdp->current_process = NULL;
-#ifdef ERTS_SMP
- p->scheduler_data = NULL;
-#endif
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
+ reds,
+ actual_reds);
- erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_STATUS
- | ERTS_PROC_LOCK_TRACE));
+ esdp->current_process = NULL;
+ if (is_normal_sched)
+ p->scheduler_data = NULL;
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
+ erts_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_STATUS
+ | ERTS_PROC_LOCK_TRACE));
- if (state & ERTS_PSFLG_FREE) {
- if (!is_normal_sched) {
- ASSERT(p->flags & F_DELAYED_DEL_PROC);
- erts_proc_dec_refc(p);
- }
- else {
-#ifdef ERTS_SMP
- ASSERT(esdp->free_process == p);
- esdp->free_process = NULL;
-#else
- erts_proc_dec_refc(p);
-#endif
- }
- }
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
+
+ if (state & ERTS_PSFLG_FREE) {
+ if (!is_normal_sched) {
+ ASSERT(p->flags & F_DELAYED_DEL_PROC);
+ }
+ else {
+ ASSERT(esdp->free_process == p);
+ esdp->free_process = NULL;
+ }
+ }
+
+ if (dec_refc)
+ erts_proc_dec_refc_free_func(p,
+ unlock_lock_rq,
+ (void *) rq);
+ }
-#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
-#endif
ASSERT(!esdp->current_process);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
- if (is_normal_sched) {
- if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS)
- (void) erts_get_monotonic_time(esdp);
-
- if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
- erts_smp_runq_unlock(rq);
- erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time);
- erts_smp_runq_lock(rq);
- }
- }
}
- ERTS_SMP_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking());
check_activities_to_run: {
erts_aint32_t psflg_running, psflg_running_sys;
-#ifdef ERTS_SMP
ErtsMigrationPaths *mps;
ErtsMigrationPath *mp;
if (is_normal_sched) {
+
+ if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS)
+ (void) erts_get_monotonic_time(esdp);
+
+ if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ erts_runq_unlock(rq);
+ erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time);
+ erts_runq_lock(rq);
+ }
+
if (rq->check_balance_reds <= 0)
check_balance(rq);
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(!erts_thr_progress_is_blocking());
mps = erts_get_migration_paths_managed();
mp = &mps->mpath[rq->ix];
@@ -9666,15 +9391,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
immigrate(rq, mp);
}
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
continue_check_activities_to_run:
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
continue_check_activities_to_run_known_flags:
ASSERT(!is_normal_sched || (flags & ERTS_RUNQ_FLG_NONEMPTY));
if (!is_normal_sched) {
- if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED) {
+ if (erts_atomic32_read_acqb(&esdp->ssi->flags)
+ & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) {
suspend_scheduler(esdp);
}
}
@@ -9684,8 +9409,10 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ASSERT(is_normal_sched);
- if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND
+ | ERTS_RUNQ_FLG_SUSPENDED
+ | ERTS_RUNQ_FLG_MSB_EXEC)) {
+ if (flags & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_MSB_EXEC)) {
(void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
suspend_scheduler(esdp);
flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
@@ -9698,42 +9425,32 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
}
- leader_update = erts_thr_progress_update(esdp);
+ leader_update = erts_thr_progress_update(erts_thr_prgr_data(esdp));
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
if (aux_work | leader_update) {
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
if (leader_update)
- erts_thr_progress_leader_update(esdp);
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
if (aux_work)
handle_aux_work(&esdp->aux_work_data, aux_work, 0);
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
}
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(!erts_thr_progress_is_blocking());
}
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
-#else /* ERTS_SMP */
- {
- erts_aint32_t aux_work;
- aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
- if (aux_work)
- handle_aux_work(&esdp->aux_work_data, aux_work, 0);
- }
-#endif /* ERTS_SMP */
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
- if (!is_normal_sched && rq->halt_in_progress) {
+ if (!is_normal_sched & !!(flags & ERTS_RUNQ_FLG_HALTING)) {
/* Wait for emulator to terminate... */
while (1)
erts_milli_sleep(1000*1000);
}
- else if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
- || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
+ else if (!runq_got_work_to_execute_flags(flags)) {
/* Prepare for scheduler wait */
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
@@ -9744,107 +9461,69 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (flags & ERTS_RUNQ_FLG_INACTIVE)
empty_runq(rq);
else {
- if (is_normal_sched && try_steal_task(rq))
- goto continue_check_activities_to_run;
-
- empty_runq(rq);
-
- /*
- * Check for ERTS_RUNQ_FLG_SUSPENDED has to be done
- * after trying to steal a task.
- */
- flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
- non_empty_runq(rq);
- flags |= ERTS_RUNQ_FLG_NONEMPTY;
- goto continue_check_activities_to_run_known_flags;
+ ASSERT(!runq_got_work_to_execute(rq));
+ if (!is_normal_sched) {
+ /* Dirty scheduler */
+ if (erts_atomic32_read_acqb(&esdp->ssi->flags)
+ & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) {
+ /* Go suspend... */
+ goto continue_check_activities_to_run_known_flags;
+ }
+ }
+ else {
+ /* Normal scheduler */
+ if (try_steal_task(rq))
+ goto continue_check_activities_to_run;
+ /*
+ * Check for suspend has to be done after trying
+ * to steal a task...
+ */
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+ if ((flags & ERTS_RUNQ_FLG_SUSPENDED)
+ /* If multi scheduling block and we have
+ * dirty work, suspend and let dirty
+ * scheduler handle work... */
+ || ((((flags & (ERTS_RUNQ_FLG_HALTING
+ | ERTS_RUNQ_FLG_MSB_EXEC))
+ == ERTS_RUNQ_FLG_MSB_EXEC))
+ && have_dirty_work())
+ ) {
+ non_empty_runq(rq);
+ flags |= ERTS_RUNQ_FLG_NONEMPTY;
+ /*
+ * Go suspend...
+ */
+ goto continue_check_activities_to_run_known_flags;
+ }
}
+ empty_runq(rq);
}
-#endif
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_EXEC);
scheduler_wait(&fcalls, esdp, rq);
flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
flags |= ERTS_RUNQ_FLG_EXEC;
ERTS_MSACC_UPDATE_CACHE();
-#ifdef ERTS_SMP
non_empty_runq(rq);
-#endif
-
- goto check_activities_to_run;
- }
- else if (is_normal_sched
- && (fcalls > input_reductions
- && prepare_for_sys_schedule(!0))) {
- ErtsMonotonicTime current_time;
- /*
- * Schedule system-level activities.
- */
-
- ERTS_MSACC_PUSH_STATE_CACHED_M();
-
- erts_smp_atomic32_set_relb(&function_calls, 0);
- fcalls = 0;
-
-#if 0 /* Not needed since we wont wait in sys schedule */
- erts_sys_schedule_interrupt(0);
-#endif
- erts_smp_runq_unlock(rq);
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
- LTTNG2(scheduler_poll, esdp->no, 1);
-
- erl_sys_schedule(1);
- ERTS_MSACC_POP_STATE_M();
-
- current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
- erts_bump_timers(esdp->timer_wheel, current_time);
-
-#ifdef ERTS_SMP
- erts_smp_runq_lock(rq);
- clear_sys_scheduling();
- goto continue_check_activities_to_run;
-#else
goto check_activities_to_run;
-#endif
}
- if (rq->misc.start)
+ if (flags & ERTS_RUNQ_FLG_MISC_OP)
exec_misc_ops(rq);
-#ifdef ERTS_SMP
- wakeup_other.check(rq, flags);
-#endif
+ runq_get_wakeup_other_params(rq)->check(rq, flags);
/*
* Find a new port to run.
*/
- if (RUNQ_READ_LEN(&rq->ports.info.len)) {
- int have_outstanding_io;
- have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port);
- if ((!erts_eager_check_io
- && have_outstanding_io
- && fcalls > 2*input_reductions)
- || rq->halt_in_progress) {
- /*
- * If we have performed more than 2*INPUT_REDUCTIONS since
- * last call to erl_sys_schedule() and we still haven't
- * handled all I/O tasks we stop running processes and
- * focus completely on ports.
- *
- * One could argue that this is a strange behavior. The
- * reason for doing it this way is that it is similar
- * to the behavior before port tasks were introduced.
- * We don't want to change the behavior too much, at
- * least not at the time of writing. This behavior
- * might change in the future.
- *
- * /rickard
- */
- goto check_activities_to_run;
- }
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+
+ if (flags & PORT_BIT) {
+ erts_port_task_execute(rq, &esdp->current_port);
+ if (flags & ERTS_RUNQ_FLG_HALTING)
+ goto check_activities_to_run;
}
/*
@@ -9874,7 +9553,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
case 0: /* No process at all */
default:
ASSERT(qmask == 0);
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
goto check_activities_to_run;
}
@@ -9900,20 +9579,22 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (!(state & ERTS_PSFLG_PROXY))
psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
else {
+ Eterm pid = p->common.id;
proxy_p = p;
- p = erts_proc_lookup_raw(proxy_p->common.id);
+ p = (is_normal_sched
+ ? erts_proc_lookup_raw(pid)
+ : erts_pid2proc_opt(NULL, 0, pid, 0,
+ ERTS_P2P_FLG_INC_REFC));
if (!p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
goto pick_next_process;
}
- state = erts_smp_atomic32_read_nob(&p->state);
+ state = erts_atomic32_read_nob(&p->state);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!is_normal_sched)
clear_proc_dirty_queue_bit(p, rq, qbit);
-#endif
while (1) {
erts_aint32_t exp, new;
@@ -9931,22 +9612,25 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_FREE)))
-#ifdef ERTS_DIRTY_SCHEDULERS
| (((state & (ERTS_PSFLG_RUNNING
+
| ERTS_PSFLG_FREE
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_EXITING))
== ERTS_PSFLG_EXITING)
& (!!is_normal_sched))
-#endif
)
& ((state & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_EXITING
| ERTS_PSFLG_FREE
- | ERTS_PSFLG_PENDING_EXIT
| ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- != ERTS_PSFLG_SUSPENDED));
+ != ERTS_PSFLG_SUSPENDED)
+ & (!(state & ERTS_PSFLG_EXITING)
+ | (!!is_normal_sched))
+ );
+
if (run_process) {
if (state & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS))
@@ -9954,7 +9638,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
else
new |= psflg_running;
}
- state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp);
+ state = erts_atomic32_cmpxchg_relb(&p->state, new, exp);
if (state == exp) {
if (!run_process) {
if (proxy_p) {
@@ -9963,8 +9647,11 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
else if (state & ERTS_PSFLG_FREE) {
/* free and not queued by proxy */
+ ASSERT(state & ERTS_PSFLG_IN_RUNQ);
erts_proc_dec_refc(p);
}
+ if (!is_normal_sched)
+ erts_proc_dec_refc(p);
goto pick_next_process;
}
state = new;
@@ -9979,22 +9666,21 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
calls = 0;
reds = context_reds;
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
}
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_EMULATOR);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_EMULATOR);
-#ifdef ERTS_SMP
if (flags & ERTS_RUNQ_FLG_PROTECTED)
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
- state = erts_smp_atomic32_read_nob(&p->state);
+ state = erts_atomic32_read_nob(&p->state);
if (erts_sched_stat.enabled) {
int prio;
@@ -10005,35 +9691,37 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
- erts_smp_spin_lock(&erts_sched_stat.lock);
+ erts_spin_lock(&erts_sched_stat.lock);
erts_sched_stat.prio[prio].total_executed++;
erts_sched_stat.prio[prio].executed++;
if (migrated) {
erts_sched_stat.prio[prio].total_migrated++;
erts_sched_stat.prio[prio].migrated++;
}
- erts_smp_spin_unlock(&erts_sched_stat.lock);
+ erts_spin_unlock(&erts_sched_stat.lock);
}
- state = erts_smp_atomic32_read_nob(&p->state);
+ state = erts_atomic32_read_nob(&p->state);
- ASSERT(!p->scheduler_data);
-#ifndef ERTS_DIRTY_SCHEDULERS
- p->scheduler_data = esdp;
-#else /* ERTS_DIRTY_SCHEDULERS */
if (is_normal_sched) {
+ ASSERT(!p->scheduler_data);
+ p->scheduler_data = esdp;
if ((!!(state & ERTS_PSFLGS_DIRTY_WORK))
- & (!(state & ERTS_PSFLG_ACTIVE_SYS))) {
+ & (!(state & ERTS_PSFLG_RUNNING_SYS))) {
/* Migrate to dirty scheduler... */
sunlock_sched_out_proc:
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (IS_TRACED(p))
+ trace_schedule_in(p, state);
goto sched_out_proc;
}
- p->scheduler_data = esdp;
}
else {
+ if (!(state & ERTS_PSFLGS_DIRTY_WORK)) {
+ /* Dirty work completed... */
+ goto sunlock_sched_out_proc;
+ }
if (state & (ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_PENDING_EXIT
| ERTS_PSFLG_EXITING)) {
/*
* IMPORTANT! We need to take care of
@@ -10049,104 +9737,102 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
goto sunlock_sched_out_proc;
}
- ASSERT((state & ERTS_PSFLG_DIRTY_ACTIVE_SYS)
- || *p->i == (BeamInstr) em_call_nif);
-
ASSERT(rq == ERTS_DIRTY_CPU_RUNQ
? (state & (ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_ACTIVE_SYS))
: (rq == ERTS_DIRTY_IO_RUNQ
&& (state & ERTS_PSFLG_DIRTY_IO_PROC)));
}
-#endif
-
- if (state & ERTS_PSFLG_PENDING_EXIT) {
- erts_handle_pending_exit(p,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
- state = erts_smp_atomic32_read_nob(&p->state);
- }
-
-#endif /* ERTS_SMP */
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
-
- /* Clear tracer if it has been removed */
- if (IS_TRACED(p) && erts_is_tracer_proc_enabled(
- p, ERTS_PROC_LOCK_MAIN, &p->common)) {
-
- if (state & ERTS_PSFLG_EXITING) {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting);
- }
- else {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
- ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
- trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
- }
- if (IS_TRACED_FL(p, F_TRACE_CALLS)) {
- erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN);
- }
- }
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+ if (IS_TRACED(p))
+ trace_schedule_in(p, state);
+
+ if (is_normal_sched) {
+ if (state & ERTS_PSFLG_RUNNING_SYS) {
+ if (state & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)) {
+ int local_only = (!!(p->flags & F_LOCAL_SIGS_ONLY)
+ & !(state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLGS_DIRTY_WORK)));
+ if (!local_only | !!(state & ERTS_PSFLG_SIG_Q)) {
+ int sig_reds;
+ /*
+ * If we have dirty work scheduled we allow
+ * usage of all reductions since we need to
+ * handle all signals before doing dirty
+ * work...
+ */
+ if (state & ERTS_PSFLGS_DIRTY_WORK)
+ sig_reds = reds;
+ else
+ sig_reds = ERTS_SIG_HANDLE_REDS_MAX_PREFERED;
+ (void) erts_proc_sig_handle_incoming(p,
+ &state,
+ &sig_reds,
+ sig_reds,
+ local_only);
+ reds -= sig_reds;
+ }
+ }
+ if ((state & (ERTS_PSFLG_SYS_TASKS
+ | ERTS_PSFLG_EXITING)) == ERTS_PSFLG_SYS_TASKS) {
+ /*
+ * GC is normally never delayed when a process
+ * is scheduled out, but might be when executing
+ * hand written beam assembly in
+ * prim_eval:'receive'. If GC is delayed we are
+ * not allowed to execute system tasks.
+ */
+ if (!(p->flags & F_DELAY_GC)) {
+ int cost = execute_sys_tasks(p, &state, reds);
+ calls += cost;
+ reds -= cost;
+ }
+ }
- if (state & (ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
- /*
- * GC is normally never delayed when a process
- * is scheduled out, but might be when executing
- * hand written beam assembly in
- * prim_eval:'receive'. If GC is delayed we are
- * not allowed to execute system tasks.
- */
- if (!(p->flags & F_DELAY_GC)) {
- int cost = execute_sys_tasks(p, &state, reds);
- calls += cost;
- reds -= cost;
- if (reds <= 0
-#ifdef ERTS_DIRTY_SCHEDULERS
- || !is_normal_sched
- || (state & ERTS_PSFLGS_DIRTY_WORK)
-#endif
- ) {
- goto sched_out_proc;
- }
- }
+ if (reds <= 0 || (state & ERTS_PSFLGS_DIRTY_WORK))
+ goto sched_out_proc;
- ASSERT(state & psflg_running_sys);
- ASSERT(!(state & psflg_running));
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
- while (1) {
- erts_aint32_t n, e;
+ while (1) {
+ erts_aint32_t n, e;
- if (((state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
- && !(state & ERTS_PSFLG_EXITING)) {
- goto sched_out_proc;
- }
+ if (((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
+ & !(state & ERTS_PSFLG_EXITING)) {
+ goto sched_out_proc;
+ }
- n = e = state;
- n &= ~psflg_running_sys;
- n |= psflg_running;
+ n = e = state;
+ n &= ~psflg_running_sys;
+ n |= psflg_running;
- state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
- if (state == e) {
- state = n;
- break;
- }
+ state = erts_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (state == e) {
+ state = n;
+ break;
+ }
- ASSERT(state & psflg_running_sys);
- ASSERT(!(state & psflg_running));
- }
- }
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
+ }
+ }
- if (ERTS_IS_GC_DESIRED(p) && !ERTS_SCHEDULER_IS_DIRTY_IO(esdp)) {
- if (!(state & ERTS_PSFLG_EXITING) && !(p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
- int cost = scheduler_gc_proc(p, reds);
- calls += cost;
- reds -= cost;
- if (reds <= 0)
- goto sched_out_proc;
- }
- }
+ if (ERTS_IS_GC_DESIRED(p)) {
+ if (!(state & ERTS_PSFLG_EXITING)
+ && !(p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
+ int cost = scheduler_gc_proc(p, reds);
+ calls += cost;
+ reds -= cost;
+ if (reds <= 0)
+ goto sched_out_proc;
+ if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC))
+ goto sched_out_proc;
+ }
+ }
+ }
if (proxy_p) {
free_proxy_proc(proxy_p);
@@ -10154,19 +9840,24 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
p->fcalls = reds;
-
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ if (reds != context_reds) {
+ actual_reds = context_reds - reds - esdp->virtual_reds;
+ ASSERT(actual_reds >= 0);
+ esdp->virtual_reds = 0;
+ p->reds += actual_reds;
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
+ reds,
+ actual_reds);
+ }
- /* Never run a suspended process */
- ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state)));
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
ASSERT(erts_proc_read_refc(p) > 0);
if (!(state & ERTS_PSFLG_EXITING) && ERTS_PTMR_IS_TIMED_OUT(p)) {
BeamInstr** pi;
-#ifdef ERTS_SMP
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
-#endif
pi = (BeamInstr **) p->def_arg_reg;
p->i = *pi;
p->flags &= ~F_INSLPQUEUE;
@@ -10174,14 +9865,110 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ERTS_PTMR_CLEAR(p);
}
+ /* if exiting, we *shall* exit... */
+ ASSERT(!(state & ERTS_PSFLG_EXITING)
+ || p->i == (BeamInstr *) beam_exit
+ || p->i == (BeamInstr *) beam_continue_exit);
+
+#ifdef DEBUG
+ if (is_normal_sched) {
+ if (state & ERTS_PSFLGS_DIRTY_WORK)
+ ERTS_INTERNAL_ERROR("Executing dirty code on normal scheduler");
+ }
+ else {
+ if (!(state & ERTS_PSFLGS_DIRTY_WORK)) {
+ if (esdp->type == ERTS_SCHED_DIRTY_CPU)
+ ERTS_INTERNAL_ERROR("Executing normal code on dirty CPU scheduler");
+ else if (esdp->type == ERTS_SCHED_DIRTY_IO)
+ ERTS_INTERNAL_ERROR("Executing normal code on dirty IO scheduler");
+ else
+ ERTS_INTERNAL_ERROR("Executing normal code on dirty UNKNOWN scheduler");
+ }
+ }
+ {
+ erts_aint32_t dstate = erts_atomic32_read_nob(&p->state);
+
+ /* Never run a suspended process */
+ ASSERT(!(ERTS_PSFLG_SUSPENDED & dstate)
+ || (ERTS_PSFLG_DIRTY_RUNNING_SYS & dstate));
+
+ /* Do not execute on the wrong type of scheduler... */
+ ASSERT(is_normal_sched
+ ? !(dstate & ERTS_PSFLGS_DIRTY_WORK)
+ : !!(dstate & ERTS_PSFLGS_DIRTY_WORK));
+ }
+#endif
+
return p;
+
+ sched_out_proc:
+ actual_reds = context_reds;
+ actual_reds -= reds;
+ actual_reds -= esdp->virtual_reds;
+ reds = actual_reds;
+ goto internal_sched_out_proc;
+
+ }
+}
+
+static void
+trace_schedule_in(Process *p, erts_aint32_t state)
+{
+ ASSERT(IS_TRACED(p));
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCK_MAIN);
+
+ /* Clear tracer if it has been removed */
+ if (erts_is_tracer_proc_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common)) {
+
+ if (state & ERTS_PSFLG_EXITING) {
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting);
+ }
+ else {
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
+ ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
+ }
+ if (IS_TRACED_FL(p, F_TRACE_CALLS))
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN);
+ }
+
+}
+
+static void
+trace_schedule_out(Process *p, erts_aint32_t state)
+{
+ ASSERT(IS_TRACED(p));
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCK_MAIN);
+
+ if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE))
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
+
+ if ((state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING) {
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN,
+ ((state & ERTS_PSFLG_FREE)
+ ? am_out_exited
+ : am_out_exiting));
+ }
+ else {
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
+ ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out);
}
}
static int
-notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
+notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st,
+ Eterm st_result, int normal_sched)
{
- Process *rp = erts_proc_lookup(st->requester);
+ Process *rp;
+ if (!normal_sched)
+ rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
+ st->requester, 0,
+ ERTS_P2P_FLG_INC_REFC);
+ else
+ rp = erts_proc_lookup(st->requester);
if (rp) {
ErtsProcLocks rp_locks;
ErlOffHeap *ohp;
@@ -10222,13 +10009,17 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
ASSERT(hp_start + hsz == hp);
#endif
- erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
+
+ if (!normal_sched)
+ erts_proc_dec_refc(rp);
}
erts_cleanup_offheap(&st->off_heap);
@@ -10247,7 +10038,7 @@ fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop)
*priop = -1; /* Shut up annoying erroneous warning */
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
if (!c_p->sys_task_qs) {
qmask = 0;
@@ -10363,17 +10154,17 @@ fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop)
n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
if (!qmask)
- n &= ~ERTS_PSFLG_ACTIVE_SYS;
+ n &= ~ERTS_PSFLG_SYS_TASKS;
if (a == n)
break;
- a = erts_smp_atomic32_cmpxchg_nob(&c_p->state, n, e);
+ a = erts_atomic32_cmpxchg_nob(&c_p->state, n, e);
} while (a != e);
}
done:
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
if (unused_qs)
proc_sys_task_queues_free(unused_qs);
@@ -10383,48 +10174,62 @@ done:
return st;
}
+
+static void exit_permanent_prio_elevation(Process *c_p, erts_aint32_t state);
static void save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio);
+static void save_dirty_task(Process *c_p, ErtsProcSysTask *st);
static int
execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
{
- int garbage_collected = 0;
+ int minor_gc = 0, major_gc = 0;
erts_aint32_t state = *statep;
int reds = in_reds;
int qmask = 0;
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
do {
+ ErtsProcSysTaskType type;
ErtsProcSysTask *st;
int st_prio;
Eterm st_res;
- if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
- if (state & ERTS_PSFLG_PENDING_EXIT)
- erts_handle_pending_exit(c_p, ERTS_PROC_LOCK_MAIN);
-#endif
- ASSERT(ERTS_PROC_IS_EXITING(c_p));
+ if (state & ERTS_PSFLG_EXITING)
break;
- }
st = fetch_sys_task(c_p, state, &qmask, &st_prio);
if (!st)
break;
- switch (st->type) {
- case ERTS_PSTT_GC:
+ type = st->type;
+
+ switch (type) {
+ case ERTS_PSTT_GC_MAJOR:
+ case ERTS_PSTT_GC_MINOR:
if (c_p->flags & F_DISABLE_GC) {
save_gc_task(c_p, st, st_prio);
st = NULL;
reds--;
}
else {
- if (!garbage_collected) {
- FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ if ((!minor_gc
+ || (!major_gc && type == ERTS_PSTT_GC_MAJOR))
+ && !(c_p->flags & F_HIBERNATED)) {
+ if (type == ERTS_PSTT_GC_MAJOR) {
+ FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ }
reds -= scheduler_gc_proc(c_p, reds);
- garbage_collected = 1;
+ if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ save_dirty_task(c_p, st);
+ st = NULL;
+ break;
+ }
+ if (type == ERTS_PSTT_GC_MAJOR)
+ minor_gc = major_gc = 1;
+ else
+ minor_gc = 1;
}
st_res = am_true;
}
@@ -10438,7 +10243,6 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
fcalls = reds - CONTEXT_REDS;
st_res = erts_check_process_code(c_p,
st->arg[0],
- unsigned_val(st->arg[1]),
&cpc_reds,
fcalls);
reds -= cpc_reds;
@@ -10449,46 +10253,92 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
}
break;
}
-#ifdef ERTS_NEW_PURGE_STRATEGY
case ERTS_PSTT_CLA: {
int fcalls;
int cla_reds = 0;
+ int do_gc;
+
if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
fcalls = reds;
else
fcalls = reds - CONTEXT_REDS;
- st_res = erts_proc_copy_literal_area(c_p,
- &cla_reds,
- fcalls,
- st->arg[0] == am_true);
+ do_gc = st->arg[0] == am_true;
+ st_res = erts_proc_copy_literal_area(c_p, &cla_reds,
+ fcalls, do_gc);
reds -= cla_reds;
if (is_non_value(st_res)) {
+ if (c_p->flags & F_DIRTY_CLA) {
+ save_dirty_task(c_p, st);
+ st = NULL;
+ break;
+ }
/* Needed gc, but gc was disabled */
save_gc_task(c_p, st, st_prio);
st = NULL;
+ break;
}
+ if (do_gc) /* We did a major gc */
+ minor_gc = major_gc = 1;
break;
}
-#endif
case ERTS_PSTT_COHMQ:
reds -= erts_complete_off_heap_message_queue_change(c_p);
st_res = am_true;
break;
-#ifdef ERTS_SMP
case ERTS_PSTT_FTMQ:
reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
st_res = am_true;
break;
-#endif
+ case ERTS_PSTT_ETS_FREE_FIXATION:
+ reds -= erts_db_execute_free_fixation(c_p, (DbFixation*)st->arg[0]);
+ st_res = am_true;
+ break;
+ case ERTS_PSTT_PRIO_SIG: {
+ erts_aint32_t fail_state, state;
+ int local_only, sig_res, sig_reds = reds;
+ st_res = am_false;
+
+ if (st->arg[0] == am_true)
+ local_only = !0;
+ else
+ local_only = 0;
+
+ sig_reds = reds;
+ sig_res = erts_proc_sig_handle_incoming(c_p, &state, &sig_reds,
+ reds, local_only);
+ reds -= sig_reds;
+
+ if (state & ERTS_PSFLG_EXITING) {
+ exit_permanent_prio_elevation(c_p, state);
+ break;
+ }
+
+ if (sig_res)
+ break;
+
+ st->arg[0] = am_true;
+
+ fail_state = ERTS_PSFLG_EXITING;
+
+ if (schedule_process_sys_task(c_p, st_prio, st, &fail_state)) {
+ /* Successfully rescheduled task... */
+ st = NULL;
+ }
+ else {
+ state = erts_atomic32_read_nob(&c_p->state);
+ exit_permanent_prio_elevation(c_p, state);
+ }
+ break;
+ }
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
}
if (st)
- reds += notify_sys_task_executed(c_p, st, st_res);
+ reds += notify_sys_task_executed(c_p, st, st_res, 1);
- state = erts_smp_atomic32_read_acqb(&c_p->state);
+ state = erts_atomic32_read_acqb(&c_p->state);
} while (qmask && reds > 0);
*statep = state;
@@ -10505,51 +10355,175 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
erts_aint32_t state = in_state;
int max_reds = in_reds;
int reds = 0;
- int qmask = 0;
+ int qmask = 1; /* Set to 1 to force looping as long as there
+ * are dirty tasks.
+ */
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
do {
ErtsProcSysTask *st;
Eterm st_res;
int st_prio;
- st = fetch_sys_task(c_p, state, &qmask, &st_prio);
- if (!st)
- break;
+ if (c_p->dirty_sys_tasks) {
+ st = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = st->next;
+ }
+ else
+ {
+ st = fetch_sys_task(c_p, state, &qmask, &st_prio);
+ if (!st)
+ break;
+ }
switch (st->type) {
- case ERTS_PSTT_GC:
+ case ERTS_PSTT_PRIO_SIG:
+ state = erts_atomic32_read_nob(&c_p->state);
+ exit_permanent_prio_elevation(c_p, state);
+ /* fall through... */
+ case ERTS_PSTT_GC_MAJOR:
+ case ERTS_PSTT_GC_MINOR:
case ERTS_PSTT_CPC:
- case ERTS_PSTT_COHMQ:
+ case ERTS_PSTT_COHMQ:
+ case ERTS_PSTT_ETS_FREE_FIXATION:
st_res = am_false;
break;
-#ifdef ERTS_NEW_PURGE_STRATEGY
case ERTS_PSTT_CLA:
st_res = am_ok;
break;
-#endif
-#ifdef ERTS_SMP
case ERTS_PSTT_FTMQ:
reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
st_res = am_true;
break;
-#endif
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
break;
}
- reds += notify_sys_task_executed(c_p, st, st_res);
+ reds += notify_sys_task_executed(c_p, st, st_res, 1);
- state = erts_smp_atomic32_read_acqb(&c_p->state);
+ state = erts_atomic32_read_acqb(&c_p->state);
} while (qmask && reds < max_reds);
return reds;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
+static void
+exit_permanent_prio_elevation(Process *c_p, erts_aint32_t state)
+{
+ erts_aint32_t a;
+ /*
+ * we are about to terminate; permanently elevate
+ * prio in order to ensure high prio signal
+ * handling...
+ */
+ a = state;
+ while (1) {
+ erts_aint32_t aprio, uprio, n, e;
+ ASSERT(a & ERTS_PSFLG_EXITING);
+ aprio = ERTS_PSFLGS_GET_ACT_PRIO(a);
+ uprio = ERTS_PSFLGS_GET_USR_PRIO(a);
+ if (aprio >= uprio)
+ break; /* user prio >= actual prio */
+ /*
+ * actual prio is higher than user prio; raise
+ * user prio to actual prio...
+ */
+ n = e = a;
+ n &= ~ERTS_PSFLGS_USR_PRIO_MASK;
+ n |= aprio << ERTS_PSFLGS_USR_PRIO_OFFSET;
+ a = erts_atomic32_cmpxchg_mb(&c_p->state, n, e);
+ if (a == e)
+ break;
+ }
+}
+
+void
+erts_execute_dirty_system_task(Process *c_p)
+{
+ Eterm cla_res = THE_NON_VALUE;
+ ErtsProcSysTask *stasks;
+
+ /*
+ * If multiple operations, perform them in the following
+ * order (in order to avoid unnecessary GC):
+ * 1. Copy Literal Area (implies major GC).
+ * 2. GC Hibernate (implies major GC if not woken).
+ * 3. Major GC (implies minor GC).
+ * 4. Minor GC.
+ *
+ * System task requests are handled after the actual
+ * operations have been performed...
+ */
+
+ ASSERT(!(c_p->flags & (F_DELAY_GC|F_DISABLE_GC)));
+
+ if (c_p->flags & F_DIRTY_CLA) {
+ int cla_reds = 0;
+ cla_res = erts_proc_copy_literal_area(c_p, &cla_reds, c_p->fcalls, 1);
+ ASSERT(is_value(cla_res));
+ }
+
+ if (c_p->flags & F_DIRTY_GC_HIBERNATE) {
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ if (erts_proc_sig_fetch(c_p))
+ c_p->flags &= ~F_DIRTY_GC_HIBERNATE; /* operation aborted... */
+ else {
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ c_p->fvalue = NIL;
+ erts_garbage_collect_hibernate(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ }
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ }
+
+ if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
+ if (c_p->flags & F_DIRTY_MAJOR_GC)
+ c_p->flags |= F_NEED_FULLSWEEP;
+ (void) erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg,
+ c_p->arity, c_p->fcalls);
+ }
+
+ ASSERT(!(c_p->flags & (F_DIRTY_CLA
+ | F_DIRTY_GC_HIBERNATE
+ | F_DIRTY_MAJOR_GC
+ | F_DIRTY_MINOR_GC)));
+
+ stasks = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = NULL;
+
+ while (stasks) {
+ Eterm st_res;
+ ErtsProcSysTask *st = stasks;
+ stasks = st->next;
+
+ switch (st->type) {
+ case ERTS_PSTT_CLA:
+ ASSERT(is_value(cla_res));
+ st_res = cla_res;
+ break;
+ case ERTS_PSTT_GC_MAJOR:
+ st_res = am_true;
+ break;
+ case ERTS_PSTT_GC_MINOR:
+ st_res = am_true;
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Not supported dirty system task");
+ break;
+ }
+
+ (void) notify_sys_task_executed(c_p, st, st_res, 0);
+
+ }
+
+ erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS);
+}
static BIF_RETTYPE
dispatch_system_task(Process *c_p, erts_aint_t fail_state,
@@ -10566,7 +10540,7 @@ dispatch_system_task(Process *c_p, erts_aint_t fail_state,
switch (st->type) {
case ERTS_PSTT_CPC:
- rp = erts_dirty_process_code_checker;
+ rp = erts_dirty_process_signal_handler;
ASSERT(fail_state & (ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING_SYS));
if (c_p == rp) {
@@ -10598,15 +10572,14 @@ dispatch_system_task(Process *c_p, erts_aint_t fail_state,
msg = copy_struct(operation, osz, &hp, ohp);
msg = TUPLE4(hp, st->requester, target, prio, msg);
- erts_queue_message(rp, rp_locks, mp, msg, st->requester);
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
return ret;
}
-#endif
static BIF_RETTYPE
request_system_task(Process *c_p, Eterm requester, Eterm target,
@@ -10652,7 +10625,7 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
goto badarg;
req_type = tp[1];
req_id = tp[2];
- req_id_sz = is_immed(req_id) ? req_id : size_object(req_id);
+ req_id_sz = is_immed(req_id) ? 0 : size_object(req_id);
tot_sz = req_id_sz;
for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++) {
int tix = 3 + i;
@@ -10694,22 +10667,23 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
switch (req_type) {
case am_garbage_collect:
- st->type = ERTS_PSTT_GC;
- noproc_res = am_false;
- if (!rp)
+ switch (st->arg[0]) {
+ case am_minor: st->type = ERTS_PSTT_GC_MINOR; break;
+ case am_major: st->type = ERTS_PSTT_GC_MAJOR; break;
+ default: goto badarg;
+ }
+ noproc_res = am_false;
+ if (!rp)
goto noproc;
break;
case am_check_process_code:
if (is_not_atom(st->arg[0]))
goto badarg;
- if (is_not_small(st->arg[1]) || (unsigned_val(st->arg[1]) & ~ERTS_CPC_ALL))
- goto badarg;
noproc_res = am_false;
st->type = ERTS_PSTT_CPC;
if (!rp)
goto noproc;
-#ifdef ERTS_DIRTY_SCHEDULERS
/*
* If the process should start executing dirty
* code it is important that this task is
@@ -10717,10 +10691,8 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
*/
fail_state |= (ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING_SYS);
-#endif
break;
-#ifdef ERTS_NEW_PURGE_STRATEGY
case am_copy_literals:
if (st->arg[0] != am_true && st->arg[0] != am_false)
goto badarg;
@@ -10729,7 +10701,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
if (!rp)
goto noproc;
break;
-#endif
default:
goto badarg;
@@ -10741,19 +10712,17 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
noproc:
failure = noproc_res;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
else if (fail_state & (ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
ret = dispatch_system_task(c_p, fail_state, st,
target, priority, operation);
goto cleanup_return;
}
-#endif
else {
ERTS_INTERNAL_ERROR("Unknown failure schedule_process_sys_task()");
failure = am_internal_error;
}
- notify_sys_task_executed(c_p, st, failure);
+ notify_sys_task_executed(c_p, st, failure, 1);
}
ERTS_BIF_PREP_RET(ret, am_ok);
@@ -10764,9 +10733,7 @@ badarg:
ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
-#ifdef ERTS_DIRTY_SCHEDULERS
cleanup_return:
-#endif
if (st) {
erts_cleanup_offheap(&st->off_heap);
@@ -10790,14 +10757,15 @@ erts_internal_request_system_task_4(BIF_ALIST_4)
BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
}
-static void
-erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
+static int
+schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type,
+ int prio, Eterm arg0, Eterm arg1)
{
- Process *rp = erts_proc_lookup(pid);
+ int res = 0;
+ Process *rp = erts_proc_lookup_raw(pid);
if (rp) {
ErtsProcSysTask *st;
- erts_aint32_t state, fail_state;
- int i;
+ erts_aint32_t st_prio, fail_state;
st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
ERTS_PROC_SYS_TASK_SIZE(0));
@@ -10806,27 +10774,46 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
st->reply_tag = NIL;
st->req_id = NIL;
st->req_id_sz = 0;
- for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++)
- st->arg[i] = NIL;
+ st->arg[0] = arg0;
+ st->arg[1] = arg1;
ERTS_INIT_OFF_HEAP(&st->off_heap);
- state = erts_smp_atomic32_read_nob(&rp->state);
-
- fail_state = ERTS_PSFLG_EXITING;
- if (!schedule_process_sys_task(rp, ERTS_PSFLGS_GET_USR_PRIO(state),
- st, &fail_state))
+ if (prio >= 0) {
+ st_prio = (erts_aint32_t) prio;
+ fail_state = ERTS_PSFLG_FREE;
+ }
+ else {
+ erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
+ st_prio = ERTS_PSFLGS_GET_USR_PRIO(state);
+ fail_state = ERTS_PSFLG_EXITING;
+ }
+ res = schedule_process_sys_task(rp, st_prio, st, &fail_state);
+ if (!res)
erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
}
+ return res;
}
-
void
erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
{
- erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ);
+ schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ,
+ -1, NIL, NIL);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
+void
+erts_schedule_ets_free_fixation(Eterm pid, DbFixation* fix)
+{
+ schedule_generic_sys_task(pid, ERTS_PSTT_ETS_FREE_FIXATION,
+ -1, (Eterm) fix, NIL);
+}
+
+int
+erts_sig_prio(Eterm pid, int prio)
+{
+ return schedule_generic_sys_task(pid, ERTS_PSTT_PRIO_SIG,
+ prio, am_false, NIL);
+}
static void
flush_dirty_trace_messages(void *vpid)
@@ -10840,46 +10827,38 @@ flush_dirty_trace_messages(void *vpid)
erts_free(ERTS_ALC_T_DIRTY_SL, vpid);
#endif
- proc = erts_proc_lookup(pid);
- if (proc)
- (void) erts_flush_trace_messages(proc, 0);
+ proc = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_MAIN, 0);
+ if (proc) {
+ (void) erts_flush_trace_messages(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ }
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
void
erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
{
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl;
-#endif
Eterm pid = proc->common.id;
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_aint32_t state;
if (!force_on_proc) {
- state = erts_smp_atomic32_read_nob(&proc->state);
+ state = erts_atomic32_read_nob(&proc->state);
if (state & (ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
goto sched_flush_dirty;
}
}
-#endif
-#ifdef ERTS_SMP
dhndl = erts_thr_progress_unmanaged_delay();
-#endif
- erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ);
+ schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ, -1, NIL, NIL);
-#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!force_on_proc) {
- state = erts_smp_atomic32_read_mb(&proc->state);
+ state = erts_atomic32_read_mb(&proc->state);
if (state & (ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
void *vargp;
@@ -10907,7 +10886,6 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
erts_schedule_misc_aux_work(1, flush_dirty_trace_messages, vargp);
}
}
-#endif
}
static void
@@ -10916,7 +10894,7 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
erts_aint32_t state;
ErtsProcSysTaskQs *qs;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(c_p);
if (!qs) {
@@ -10946,7 +10924,7 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
}
}
- state = erts_smp_atomic32_read_nob(&c_p->state);
+ state = erts_atomic32_read_nob(&c_p->state);
ASSERT((ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_DIRTY_RUNNING
@@ -10962,20 +10940,27 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
n |= prio << ERTS_PSFLGS_ACT_PRIO_OFFSET;
}
- state = erts_smp_atomic32_cmpxchg_relb(&c_p->state, n, e);
+ state = erts_atomic32_cmpxchg_relb(&c_p->state, n, e);
if (state == e)
break;
}
}
+static void
+save_dirty_task(Process *c_p, ErtsProcSysTask *st)
+{
+ st->next = c_p->dirty_sys_tasks;
+ c_p->dirty_sys_tasks = st;
+}
+
int
erts_set_gc_state(Process *c_p, int enable)
{
ErtsProcSysTaskQs *dgc_tsk_qs;
ASSERT(c_p == erts_get_current_process());
ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)
- & erts_smp_atomic32_read_nob(&c_p->state));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+ & erts_atomic32_read_nob(&c_p->state));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
if (!enable) {
c_p->flags |= F_DISABLE_GC;
@@ -10990,7 +10975,7 @@ erts_set_gc_state(Process *c_p, int enable)
/* Move delayed gc tasks into sys tasks queues. */
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
if (!c_p->sys_task_qs) {
c_p->sys_task_qs = dgc_tsk_qs;
@@ -11063,10 +11048,12 @@ erts_set_gc_state(Process *c_p, int enable)
erts_aint32_t aprio, state =
#endif
- erts_smp_atomic32_read_bset_nob(&c_p->state,
- (ERTS_PSFLG_DELAYED_SYS
- | ERTS_PSFLG_ACTIVE_SYS),
- ERTS_PSFLG_ACTIVE_SYS);
+ erts_atomic32_read_bset_nob(&c_p->state,
+ (ERTS_PSFLG_DELAYED_SYS
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_SYS_TASKS),
+ (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_SYS_TASKS));
#ifdef DEBUG
ASSERT(state & ERTS_PSFLG_DELAYED_SYS);
@@ -11077,7 +11064,7 @@ erts_set_gc_state(Process *c_p, int enable)
}
#endif
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
(void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, NULL);
@@ -11093,24 +11080,24 @@ erts_sched_stat_modify(int what)
int ix;
switch (what) {
case ERTS_SCHED_STAT_MODIFY_ENABLE:
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
erts_sched_stat.enabled = 1;
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
break;
case ERTS_SCHED_STAT_MODIFY_DISABLE:
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
erts_sched_stat.enabled = 0;
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
break;
case ERTS_SCHED_STAT_MODIFY_CLEAR:
- erts_smp_spin_lock(&erts_sched_stat.lock);
+ erts_spin_lock(&erts_sched_stat.lock);
for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
erts_sched_stat.prio[ix].total_executed = 0;
erts_sched_stat.prio[ix].executed = 0;
erts_sched_stat.prio[ix].total_migrated = 0;
erts_sched_stat.prio[ix].migrated = 0;
}
- erts_smp_spin_unlock(&erts_sched_stat.lock);
+ erts_spin_unlock(&erts_sched_stat.lock);
break;
}
}
@@ -11124,7 +11111,7 @@ erts_sched_stat_term(Process *p, int total)
Uint executed[ERTS_NO_PRIO_LEVELS];
Uint migrated[ERTS_NO_PRIO_LEVELS];
- erts_smp_spin_lock(&erts_sched_stat.lock);
+ erts_spin_lock(&erts_sched_stat.lock);
if (total) {
int i;
for (i = 0; i < ERTS_NO_PRIO_LEVELS; i++) {
@@ -11143,7 +11130,7 @@ erts_sched_stat_term(Process *p, int total)
erts_sched_stat.prio[i].migrated = 0;
}
}
- erts_smp_spin_unlock(&erts_sched_stat.lock);
+ erts_spin_unlock(&erts_sched_stat.lock);
sz = 0;
(void) erts_bld_atom_2uint_3tup_list(NULL, &sz, ERTS_NO_PRIO_LEVELS,
@@ -11163,7 +11150,6 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsRunQueue *rq = esdp ? esdp->run_queue : ERTS_RUNQ_IX(0);
ErtsMiscOpList *molp = misc_op_list_alloc();
-#ifdef ERTS_SMP
ErtsMigrationPaths *mpaths = erts_get_migration_paths();
if (!mpaths)
@@ -11173,9 +11159,8 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
if (erq)
rq = erq;
}
-#endif
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
molp->next = NULL;
molp->func = func;
@@ -11186,11 +11171,11 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
rq->misc.start = molp;
rq->misc.end = molp;
-#ifdef ERTS_SMP
non_empty_runq(rq);
-#endif
- erts_smp_runq_unlock(rq);
+ ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
+
+ erts_runq_unlock(rq);
smp_notify_inc_runq(rq);
}
@@ -11220,7 +11205,10 @@ exec_misc_ops(ErtsRunQueue *rq)
rq->misc.end = NULL;
}
- erts_smp_runq_unlock(rq);
+ if (!rq->misc.start)
+ ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
+
+ erts_runq_unlock(rq);
while (molp) {
tmp_molp = molp;
@@ -11229,7 +11217,7 @@ exec_misc_ops(ErtsRunQueue *rq)
misc_op_list_free(tmp_molp);
}
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
}
Uint
@@ -11246,6 +11234,8 @@ erts_get_total_reductions(Uint *redsp, Uint *diffp)
Uint reds = 0;
ERTS_ATOMIC_FOREACH_RUNQ_X(rq,
+ erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS,
+
reds += rq->procs.reductions,
if (redsp) *redsp = reds;
@@ -11258,12 +11248,12 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
{
Uint reds = erts_current_reductions(c_p, c_p);
int ix;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
/*
* Wait for other schedulers to schedule out their processes
* and update 'reductions'.
*/
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
for (reds = 0, ix = 0; ix < erts_no_run_queues; ix++)
reds += ERTS_RUNQ_IX(ix)->procs.reductions;
if (redsp)
@@ -11271,8 +11261,8 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
if (diffp)
*diffp = reds - last_exact_reductions;
last_exact_reductions = reds;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
static void delete_process(Process* p);
@@ -11280,10 +11270,8 @@ static void delete_process(Process* p);
void
erts_free_proc(Process *p)
{
-#ifdef ERTS_SMP
erts_proc_lock_fin(p);
-#endif
- ASSERT(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE);
+ ASSERT(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE);
ASSERT(0 == erts_proc_read_refc(p));
if (p->flags & F_DELAYED_DEL_PROC)
delete_process(p);
@@ -11294,6 +11282,7 @@ typedef struct {
Process *proc;
erts_aint32_t state;
ErtsRunQueue *run_queue;
+ int bound;
} ErtsEarlyProcInit;
static void early_init_process_struct(void *varg, Eterm data)
@@ -11302,16 +11291,12 @@ static void early_init_process_struct(void *varg, Eterm data)
Process *proc = arg->proc;
proc->common.id = make_internal_pid(data);
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_atomic32_init_nob(&proc->dirty_state, 0);
-#endif
- erts_smp_atomic32_init_relb(&proc->state, arg->state);
-
-#ifdef ERTS_SMP
- RUNQ_SET_RQ(&proc->run_queue, arg->run_queue);
+ erts_atomic32_init_nob(&proc->dirty_state, 0);
+ proc->dirty_sys_tasks = NULL;
+ erts_init_runq_proc(proc, arg->run_queue, arg->bound);
+ erts_atomic32_init_relb(&proc->state, arg->state);
erts_proc_lock_init(proc); /* All locks locked */
-#endif
}
@@ -11319,7 +11304,7 @@ static void early_init_process_struct(void *varg, Eterm data)
** Allocate process and find out where to place next process.
*/
static Process*
-alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
+alloc_process(ErtsRunQueue *rq, int bound, erts_aint32_t state)
{
ErtsEarlyProcInit init_arg;
Process *p;
@@ -11328,9 +11313,12 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
if (!p)
return NULL;
+ ASSERT(rq);
+
init_arg.proc = (Process *) p;
- init_arg.run_queue = rq;
init_arg.state = state;
+ init_arg.run_queue = rq;
+ init_arg.bound = bound;
ERTS_CT_ASSERT(offsetof(Process,common) == 0);
@@ -11346,7 +11334,6 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
ASSERT(internal_pid_serial(p->common.id) <= ERTS_MAX_PID_SERIAL);
- p->approx_started = erts_get_approx_time();
p->rcount = 0;
p->heap = NULL;
@@ -11365,6 +11352,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm args, /* Arguments for function (must be well-formed list). */
ErlSpawnOpts* so) /* Options for spawn. */
{
+ int bound = 0;
Uint flags = 0;
ErtsRunQueue *rq = NULL;
Process *p;
@@ -11379,9 +11367,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef SHCOPY_SPAWN
erts_shcopy_t info;
INITIALIZE_SHCOPY(info);
+#else
+ erts_literal_area_t litarea;
+ INITIALIZE_LITERAL_PURGE_AREA(litarea);
#endif
- erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
/*
* Check for errors.
@@ -11398,7 +11389,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
ASSERT(0 <= ix && ix < erts_no_run_queues);
rq = ERTS_RUNQ_IX(ix);
/* Unsupported feature... */
- state |= ERTS_PSFLG_BOUND;
+ bound = !0;
}
prio = (erts_aint32_t) so->priority;
}
@@ -11411,17 +11402,16 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
flags |= F_OFF_HEAP_MSGQ;
}
else if (so->flags & SPO_ON_HEAP_MSGQ) {
- state |= ERTS_PSFLG_ON_HEAP_MSGQ;
flags |= F_ON_HEAP_MSGQ;
}
ASSERT((flags & F_ON_HEAP_MSGQ) || (flags & F_OFF_HEAP_MSGQ));
if (!rq)
- rq = erts_get_runq_proc(parent);
+ rq = erts_get_runq_proc(parent, NULL);
- p = alloc_process(rq, state); /* All proc locks are locked by this thread
- on success */
+ p = alloc_process(rq, bound, state); /* All proc locks are locked by this thread
+ on success */
if (!p) {
erts_send_error_to_logger_str(parent->group_leader,
"Too many processes\n");
@@ -11429,15 +11419,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
goto error;
}
- ASSERT((erts_smp_atomic32_read_nob(&p->state)
- & ERTS_PSFLG_ON_HEAP_MSGQ)
- || (erts_smp_atomic32_read_nob(&p->state)
- & ERTS_PSFLG_OFF_HEAP_MSGQ));
-
#ifdef SHCOPY_SPAWN
arg_size = copy_shared_calculate(args, &info);
#else
- arg_size = size_object(args);
+ arg_size = size_object_litopt(args, &litarea);
#endif
heap_need = arg_size;
@@ -11457,14 +11442,14 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->min_vheap_size = BIN_VH_MIN_SIZE;
MAX_HEAP_SIZE_SET(p, H_MAX_SIZE);
MAX_HEAP_SIZE_FLAGS_SET(p, H_MAX_FLAGS);
- p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
+ p->max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs);
}
p->schedule_count = 0;
ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
-
- p->u.initial[INITIAL_MOD] = mod;
- p->u.initial[INITIAL_FUN] = func;
- p->u.initial[INITIAL_ARI] = (Uint) arity;
+
+ p->u.initial.module = mod;
+ p->u.initial.function = func;
+ p->u.initial.arity = (Uint) arity;
/*
* Must initialize binary lists here before copying binaries to process.
@@ -11483,9 +11468,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef HIPE
hipe_init_process(&p->hipe);
-#ifdef ERTS_SMP
- hipe_init_process_smp(&p->hipe_smp);
-#endif
#endif
p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz);
p->old_hend = p->old_htop = p->old_heap = NULL;
@@ -11506,7 +11488,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
/* No need to initialize p->fcalls. */
- p->current = p->u.initial+INITIAL_MOD;
+ p->current = &p->u.initial;
p->i = (BeamInstr *) beam_apply;
p->cp = (BeamInstr *) beam_apply+1;
@@ -11519,7 +11501,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->arg_reg[2] = copy_shared_perform(args, arg_size, &info, &p->htop, &p->off_heap);
DESTROY_SHCOPY(info);
#else
- p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap);
+ p->arg_reg[2] = copy_struct_litopt(args, arg_size, &p->htop, &p->off_heap, &litarea);
#endif
p->arity = 3;
@@ -11533,8 +11515,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->common.u.alive.reg = NULL;
ERTS_P_LINKS(p) = NULL;
ERTS_P_MONITORS(p) = NULL;
- p->nodes_monitors = NULL;
- p->suspend_monitors = NULL;
+ ERTS_P_LT_MONITORS(p) = NULL;
ASSERT(is_pid(parent->group_leader));
@@ -11550,23 +11531,28 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
erts_get_default_proc_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER(p));
- p->msg.first = NULL;
- p->msg.last = &p->msg.first;
- p->msg.save = &p->msg.first;
- p->msg.len = 0;
-#ifdef ERTS_SMP
- p->msg_inq.first = NULL;
- p->msg_inq.last = &p->msg_inq.first;
- p->msg_inq.len = 0;
+ p->sig_qs.first = NULL;
+ p->sig_qs.last = &p->sig_qs.first;
+ p->sig_qs.cont = NULL;
+ p->sig_qs.cont_last = &p->sig_qs.cont;
+ p->sig_qs.save = &p->sig_qs.first;
+ p->sig_qs.saved_last = NULL;
+ p->sig_qs.len = 0;
+ p->sig_qs.nmsigs.next = NULL;
+ p->sig_qs.nmsigs.last = NULL;
+ p->sig_inq.first = NULL;
+ p->sig_inq.last = &p->sig_inq.first;
+ p->sig_inq.len = 0;
+ p->sig_inq.nmsigs.next = NULL;
+ p->sig_inq.nmsigs.last = NULL;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+ p->sig_inq.may_contain_heap_terms = 0;
#endif
p->bif_timers = NULL;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- p->accessor_bif_timers = NULL;
-#endif
p->mbuf = NULL;
p->msg_frag = NULL;
p->mbuf_sz = 0;
- erts_smp_atomic_init_nob(&p->psd, (erts_aint_t) NULL);
+ erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL);
p->dictionary = NULL;
p->seq_trace_lastcnt = 0;
p->seq_trace_clock = 0;
@@ -11584,14 +11570,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->last_old_htop = NULL;
#endif
-#ifdef ERTS_SMP
p->trace_msg_q = NULL;
p->scheduler_data = NULL;
- p->suspendee = NIL;
- p->pending_suspenders = NULL;
- p->pending_exit.reason = THE_NON_VALUE;
- p->pending_exit.bp = NULL;
-#endif
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
p->fp_exception = 0;
@@ -11619,8 +11599,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
}
if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
- erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
trace_proc_spawn(parent, am_spawn, p->common.id, mod, func, args);
if (so->flags & SPO_LINK)
trace_proc(parent, locks, parent, am_link, p->common.id);
@@ -11632,8 +11612,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
== (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)) {
/* This happens when parent was not traced, but child is */
locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
- erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
}
trace_proc_spawn(p, am_spawned, parent->common.id, mod, func, args);
if (so->flags & SPO_LINK)
@@ -11645,34 +11625,37 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
*/
if (so->flags & SPO_LINK) {
-#ifdef DEBUG
- int ret;
-#endif
-#ifdef DEBUG
- ret = erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id);
- ASSERT(ret == 0);
- ret = erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id);
- ASSERT(ret == 0);
-#else
- erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id);
- erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id);
-#endif
-
+ ErtsLink *lnk;
+ ErtsLinkData *ldp = erts_link_create(ERTS_LNK_TYPE_PROC,
+ parent->common.id,
+ p->common.id);
+ lnk = erts_link_tree_lookup_insert(&ERTS_P_LINKS(parent), &ldp->a);
+ if (lnk) {
+ /*
+ * This should more or less never happen, but could
+ * potentially happen if pid:s wrap...
+ */
+ erts_link_release(lnk);
+ }
+ erts_link_tree_insert(&ERTS_P_LINKS(p), &ldp->b);
}
/*
* Test whether this process should be initially monitored by its parent.
*/
if (so->flags & SPO_MONITOR) {
- Eterm mref;
-
- mref = erts_make_ref(parent);
- erts_add_monitor(&ERTS_P_MONITORS(parent), MON_ORIGIN, mref, p->common.id, NIL);
- erts_add_monitor(&ERTS_P_MONITORS(p), MON_TARGET, mref, parent->common.id, NIL);
+ Eterm mref = erts_make_ref(parent);
+ ErtsMonitorData *mdp = erts_monitor_create(ERTS_MON_TYPE_PROC,
+ mref,
+ parent->common.id,
+ p->common.id,
+ NIL);
+ erts_monitor_tree_insert(&ERTS_P_MONITORS(parent), &mdp->origin);
+ erts_monitor_list_insert(&ERTS_P_LT_MONITORS(p), &mdp->target);
so->mref = mref;
}
- erts_smp_proc_unlock(p, locks);
+ erts_proc_unlock(p, locks);
res = p->common.id;
@@ -11680,7 +11663,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
* Schedule process for execution.
*/
- erts_smp_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR);
schedule_process(p, state, 0);
@@ -11688,18 +11671,19 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_spawn)) {
+ ErtsCodeMFA cmfa = {mod, func, arity};
DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(mfa_buf, DTRACE_TERM_BUF_SIZE);
- dtrace_fun_decode(p, mod, func, arity, process_name, mfa);
- DTRACE2(process_spawn, process_name, mfa);
+ dtrace_fun_decode(p, &cmfa, process_name, mfa_buf);
+ DTRACE2(process_spawn, process_name, mfa_buf);
}
#endif
return res;
error:
- erts_smp_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR);
return res;
}
@@ -11750,26 +11734,35 @@ void erts_init_empty_process(Process *p)
p->mbuf = NULL;
p->msg_frag = NULL;
p->mbuf_sz = 0;
- erts_smp_atomic_init_nob(&p->psd, (erts_aint_t) NULL);
+ erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL);
ERTS_P_MONITORS(p) = NULL;
+ ERTS_P_LT_MONITORS(p) = NULL;
ERTS_P_LINKS(p) = NULL; /* List of links */
- p->nodes_monitors = NULL;
- p->suspend_monitors = NULL;
- p->msg.first = NULL;
- p->msg.last = &p->msg.first;
- p->msg.save = &p->msg.first;
- p->msg.len = 0;
- p->bif_timers = NULL;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- p->accessor_bif_timers = NULL;
+ p->sig_qs.first = NULL;
+ p->sig_qs.last = &p->sig_qs.first;
+ p->sig_qs.cont = NULL;
+ p->sig_qs.cont_last = &p->sig_qs.cont;
+ p->sig_qs.save = &p->sig_qs.first;
+ p->sig_qs.saved_last = NULL;
+ p->sig_qs.len = 0;
+ p->sig_qs.nmsigs.next = NULL;
+ p->sig_qs.nmsigs.last = NULL;
+ p->sig_inq.first = NULL;
+ p->sig_inq.last = &p->sig_inq.first;
+ p->sig_inq.len = 0;
+ p->sig_inq.nmsigs.next = NULL;
+ p->sig_inq.nmsigs.last = NULL;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+ p->sig_inq.may_contain_heap_terms = 0;
#endif
+ p->bif_timers = NULL;
p->dictionary = NULL;
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
p->seq_trace_token = NIL;
- p->u.initial[0] = 0;
- p->u.initial[1] = 0;
- p->u.initial[2] = 0;
+ p->u.initial.module = 0;
+ p->u.initial.function = 0;
+ p->u.initial.arity = 0;
p->catches = 0;
p->cp = NULL;
p->i = NULL;
@@ -11789,16 +11782,12 @@ void erts_init_empty_process(Process *p)
p->def_arg_reg[5] = 0;
p->parent = NIL;
- p->approx_started = 0;
p->static_flags = 0;
p->common.u.alive.started_interval = 0;
#ifdef HIPE
hipe_init_process(&p->hipe);
-#ifdef ERTS_SMP
- hipe_init_process_smp(&p->hipe_smp);
-#endif
#endif
INIT_HOLE_CHECK(p);
@@ -11806,24 +11795,14 @@ void erts_init_empty_process(Process *p)
p->last_old_htop = NULL;
#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_atomic32_init_nob(&p->dirty_state, 0);
-#endif
- erts_smp_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
+ erts_atomic32_init_nob(&p->dirty_state, 0);
+ p->dirty_sys_tasks = NULL;
+ erts_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
-#ifdef ERTS_SMP
p->scheduler_data = NULL;
- p->msg_inq.first = NULL;
- p->msg_inq.last = &p->msg_inq.first;
- p->msg_inq.len = 0;
- p->suspendee = NIL;
- p->pending_suspenders = NULL;
- p->pending_exit.reason = THE_NON_VALUE;
- p->pending_exit.bp = NULL;
erts_proc_lock_init(p);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
- RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0));
-#endif
+ erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ erts_init_runq_proc(p, ERTS_RUNQ_IX(0), 0);
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
p->fp_exception = 0;
@@ -11856,15 +11835,11 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->old_heap == NULL);
ASSERT(ERTS_P_MONITORS(p) == NULL);
+ ASSERT(ERTS_P_LT_MONITORS(p) == NULL);
ASSERT(ERTS_P_LINKS(p) == NULL);
- ASSERT(p->nodes_monitors == NULL);
- ASSERT(p->suspend_monitors == NULL);
- ASSERT(p->msg.first == NULL);
- ASSERT(p->msg.len == 0);
+ ASSERT(p->sig_qs.first == NULL);
+ ASSERT(p->sig_qs.len == 0);
ASSERT(p->bif_timers == NULL);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- ASSERT(p->accessor_bif_timers == NULL);
-#endif
ASSERT(p->dictionary == NULL);
ASSERT(p->catches == 0);
ASSERT(p->cp == NULL);
@@ -11873,14 +11848,8 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->parent == NIL);
-#ifdef ERTS_SMP
- ASSERT(p->msg_inq.first == NULL);
- ASSERT(p->msg_inq.len == 0);
- ASSERT(p->suspendee == NIL);
- ASSERT(p->pending_suspenders == NULL);
- ASSERT(p->pending_exit.reason == THE_NON_VALUE);
- ASSERT(p->pending_exit.bp == NULL);
-#endif
+ ASSERT(p->sig_inq.first == NULL);
+ ASSERT(p->sig_inq.len == 0);
/* Thing that erts_cleanup_empty_process() cleans up */
@@ -11905,9 +11874,7 @@ erts_cleanup_empty_process(Process* p)
free_message_buffer(p->mbuf);
p->mbuf = NULL;
}
-#ifdef ERTS_SMP
erts_proc_lock_fin(p);
-#endif
#ifdef DEBUG
erts_debug_verify_clean_empty_process(p);
#endif
@@ -11916,11 +11883,9 @@ erts_cleanup_empty_process(Process* p)
static void
delete_process(Process* p)
{
- Eterm *heap;
ErtsPSD *psd;
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
- void *nif_export;
VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->common.id));
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] delete process: %p %p %p %p\n", p->common.id,
@@ -11937,16 +11902,14 @@ delete_process(Process* p)
if (pbt)
erts_free(ERTS_ALC_T_BPD, (void *) pbt);
- nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL);
- if (nif_export)
- erts_destroy_nif_export(nif_export);
+ erts_destroy_nif_export(p);
/* Cleanup psd */
- psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd);
+ psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd);
if (psd) {
- erts_smp_atomic_set_nob(&p->psd, (erts_aint_t) NULL); /* Reduction counting depends on this... */
+ erts_atomic_set_nob(&p->psd, (erts_aint_t) NULL); /* Reduction counting depends on this... */
erts_free(ERTS_ALC_T_PSD, psd);
}
@@ -11971,13 +11934,8 @@ delete_process(Process* p)
hipe_delete_process(&p->hipe);
#endif
- heap = p->abandoned_heap ? p->abandoned_heap : p->heap;
-
-#ifdef DEBUG
- sys_memset(heap, DEBUG_BAD_BYTE, p->heap_sz*sizeof(Eterm));
-#endif
+ erts_deallocate_young_generation(p);
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) heap, p->heap_sz*sizeof(Eterm));
if (p->old_heap != NULL) {
#ifdef DEBUG
@@ -11989,807 +11947,316 @@ delete_process(Process* p)
(p->old_hend-p->old_heap)*sizeof(Eterm));
}
- /*
- * Free all pending message buffers.
- */
- if (p->mbuf != NULL) {
- free_message_buffer(p->mbuf);
- }
-
- if (p->msg_frag)
- erts_cleanup_messages(p->msg_frag);
-
erts_erase_dicts(p);
/* free all pending messages */
- erts_cleanup_messages(p->msg.first);
- p->msg.first = NULL;
-
- ASSERT(!p->nodes_monitors);
- ASSERT(!p->suspend_monitors);
+ erts_cleanup_messages(p->sig_qs.first);
+ p->sig_qs.first = NULL;
+ erts_cleanup_messages(p->sig_qs.cont);
+ p->sig_qs.cont = NULL;
p->fvalue = NIL;
}
static ERTS_INLINE void
-set_proc_exiting(Process *p,
- erts_aint32_t in_state,
- Eterm reason,
- ErlHeapFragment *bp)
+set_self_exiting(Process *c_p, Eterm reason, int *enqueue,
+ erts_aint32_t *prio, erts_aint32_t *state)
{
- erts_aint32_t state = in_state, enq_prio = -1;
- int enqueue;
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL);
+ erts_aint32_t st, enq_prio = -1;
+ int enq;
- enqueue = change_proc_schedule_state(p,
- (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_PENDING_EXIT
- | ERTS_PSFLGS_DIRTY_WORK),
- ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
- &state,
- &enq_prio,
- ERTS_PROC_LOCKS_ALL);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL);
- p->fvalue = reason;
- if (bp)
- erts_link_mbuf_to_proc(p, bp);
- /*
- * We used to set freason to EXC_EXIT here, but there is no need to
- * save the stack trace since this process irreversibly is going to
- * exit.
- */
- p->freason = EXTAG_EXIT;
- KILL_CATCHES(p);
- p->i = (BeamInstr *) beam_exit;
+ c_p->fvalue = reason;
-#ifndef ERTS_SMP
- if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)
- && !(state & ERTS_PSFLG_GC)) {
- /*
- * I non smp case:
- *
- * Currently executing process might be sent an exit
- * signal if it is traced by a port that it also is
- * linked to, and the port terminates during the
- * trace. In this case we want schedule out the
- * process as quickly as possible in order to detect
- * the event as fast as possible.
- */
- ERTS_VBUMP_ALL_REDS(p);
- }
-#endif
+ st = erts_atomic32_read_nob(&c_p->state);
+ ASSERT(enqueue || (st & (ERTS_PSFLG_RUNNING
+ |ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
- add2runq(enqueue, enq_prio, p, state, NULL);
-}
+ enq = change_proc_schedule_state(c_p,
+ (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLGS_DIRTY_WORK),
+ ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
+ &st,
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
-static ERTS_INLINE erts_aint32_t
-set_proc_self_exiting(Process *c_p)
-{
-#ifdef DEBUG
- int enqueue;
-#endif
- erts_aint32_t state, enq_prio = -1;
-
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL);
-
- state = erts_smp_atomic32_read_nob(&c_p->state);
- ASSERT(state & (ERTS_PSFLG_RUNNING
- |ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS));
-
-#ifdef DEBUG
- enqueue =
-#endif
- change_proc_schedule_state(c_p,
- ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
- ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
- &state,
- &enq_prio,
- ERTS_PROC_LOCKS_ALL);
-
- ASSERT(!enqueue);
- return state;
+ ASSERT(enqueue || !enq);
+ if (enqueue)
+ *enqueue = enq;
+ if (prio)
+ *prio = enq_prio;
+ if (state)
+ *state = st;
}
-#ifdef ERTS_SMP
-
void
-erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
-{
- ErtsProcLocks xlocks;
- ASSERT(is_value(c_p->pending_exit.reason));
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks);
- ERTS_SMP_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)
- & erts_smp_atomic32_read_nob(&c_p->state)));
-
- /* Ensure that all locks on c_p are locked before proceeding... */
- if (locks == ERTS_PROC_LOCKS_ALL)
- xlocks = 0;
- else {
- xlocks = ~locks & ERTS_PROC_LOCKS_ALL;
- if (erts_smp_proc_trylock(c_p, xlocks) == EBUSY) {
- erts_smp_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
-
- set_proc_exiting(c_p,
- erts_smp_atomic32_read_acqb(&c_p->state),
- c_p->pending_exit.reason,
- c_p->pending_exit.bp);
- c_p->pending_exit.reason = THE_NON_VALUE;
- c_p->pending_exit.bp = NULL;
-
- if (xlocks)
- erts_smp_proc_unlock(c_p, xlocks);
-}
-
-static void save_pending_exiter(Process *p, ErtsProcList *plp);
-
-static void
-do_handle_pending_exiters(ErtsProcList *pnd_xtrs)
-{
- /* 'list' is expected to have been fetched (i.e. not a ring anymore) */
- ErtsProcList *plp = pnd_xtrs;
-
- while (plp) {
- ErtsProcList *next_plp = plp->next;
- Process *p = erts_proc_lookup(plp->pid);
- if (p) {
- erts_aint32_t state;
- /*
- * If the process is running on a normal scheduler, the
- * pending exit will soon be detected and handled by the
- * scheduler running the process (at schedule in/out).
- */
- if (erts_smp_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) {
- if (erts_proclist_same(plp, p)) {
- state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_EXITING))) {
- ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
- erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
- }
- }
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
- }
- else {
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- if (erts_proclist_same(plp, p)) {
- state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_EXITING))) {
- /*
- * Save process and try to acquire all
- * locks at a later time...
- */
- save_pending_exiter(p, plp);
- plp = NULL;
- }
- }
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- }
- }
- if (plp)
- proclist_destroy(plp);
- plp = next_plp;
- }
-}
-
-static void
-save_pending_exiter(Process *p, ErtsProcList *plp)
+erts_set_self_exiting(Process *c_p, Eterm reason)
{
- ErtsSchedulerSleepInfo *ssi;
- ErtsRunQueue *rq;
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
-
- rq = RUNQ_READ_RQ(&p->run_queue);
- ASSERT(rq && !ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-
- if (!plp)
- plp = proclist_create(p);
-
- erts_smp_runq_lock(rq);
-
- erts_proclist_store_last(&rq->procs.pending_exiters, plp);
-
- non_empty_runq(rq);
+ int enqueue;
+ erts_aint32_t enq_prio, state;
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
- ssi = rq->scheduler->ssi;
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- erts_smp_runq_unlock(rq);
+ set_self_exiting(c_p, reason, &enqueue, &enq_prio, &state);
+ c_p->freason = EXTAG_EXIT;
+ KILL_CATCHES(c_p);
+ c_p->i = (BeamInstr *) beam_exit;
- set_aux_work_flags_wakeup_nob(ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS);
-}
-
-#endif
+ /* Always active when exiting... */
+ ASSERT(state & ERTS_PSFLG_ACTIVE);
-/*
- * This function delivers an EXIT message to a process
- * which is trapping EXITs.
- */
-
-static ERTS_INLINE void
-send_exit_message(Process *to, ErtsProcLocks *to_locksp,
- Eterm exit_term, Uint term_size, Eterm token)
-{
- ErtsMessage *mp;
- ErlOffHeap *ohp;
- Eterm* hp;
- Eterm mess;
-#ifdef SHCOPY_SEND
- erts_shcopy_t info;
-#endif
+ /*
+ * If we are terminating a process that currently
+ * is executing on a dirty scheduler. It *should*
+ * be scheduled on a normal scheduler...
+ */
+ ASSERT(!(state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))
+ || enqueue == ERTS_ENQUEUE_NORMAL_QUEUE
+ || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE);
- if (!have_seqtrace(token)) {
-#ifdef SHCOPY_SEND
- INITIALIZE_SHCOPY(info);
- term_size = copy_shared_calculate(exit_term, &info);
- mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp);
- mess = copy_shared_perform(exit_term, term_size, &info, &hp, ohp);
- DESTROY_SHCOPY(info);
-#else
- mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp);
- mess = copy_struct(exit_term, term_size, &hp, ohp);
-#endif
- erts_queue_message(to, *to_locksp, mp, mess, am_system);
- } else {
- Eterm temp_token;
- Uint sz_token;
-
- ASSERT(is_tuple(token));
- sz_token = size_object(token);
-#ifdef SHCOPY_SEND
- INITIALIZE_SHCOPY(info);
- term_size = copy_shared_calculate(exit_term, &info);
- mp = erts_alloc_message_heap(to, to_locksp, term_size+sz_token, &hp, &ohp);
- mess = copy_shared_perform(exit_term, term_size, &info, &hp, ohp);
- DESTROY_SHCOPY(info);
-#else
- mp = erts_alloc_message_heap(to, to_locksp, term_size+sz_token, &hp, &ohp);
- mess = copy_struct(exit_term, term_size, &hp, ohp);
-#endif
- /* the trace token must in this case be updated by the caller */
- seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, to);
- temp_token = copy_struct(token, sz_token, &hp, ohp);
- ERL_MESSAGE_TOKEN(mp) = temp_token;
- erts_queue_message(to, *to_locksp, mp, mess, am_system);
- }
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ if (enqueue)
+ add2runq(enqueue, enq_prio, c_p, state, NULL);
}
-/*
- *
- * *** Exit signal behavior ***
- *
- * Exit signals are asynchronous (truly asynchronous in the
- * SMP emulator). When the signal is received the receiver receives an
- * 'EXIT' message if it is trapping exits; otherwise, it will either
- * ignore the signal if the exit reason is normal, or go into an
- * exiting state (ERTS_PSFLG_EXITING). When a process has gone into the
- * exiting state it will not execute any more Erlang code, but it might
- * take a while before it actually exits. The exit signal is being
- * received when the 'EXIT' message is put in the message queue, the
- * signal is dropped, or when it changes state into exiting. The time it
- * is in the exiting state before actually exiting is undefined (it
- * might take a really long time under certain conditions). The
- * receiver of the exit signal does not break links or trigger monitors
- * until it actually exits.
- *
- * Exit signals and other signals, e.g. messages, have to be received
- * by a receiver in the same order as sent by a sender.
- *
- *
- *
- * Exit signal implementation in the SMP emulator:
- *
- * If the receiver is trapping exits, the signal is transformed
- * into an 'EXIT' message and sent as a normal message, if the
- * reason is normal the signal is dropped; otherwise, the process
- * is determined to be exited. The interesting case is when the
- * process is to be exited and this is what is described below.
- *
- * If it is possible, the receiver is set in the exiting state straight
- * away and we are done; otherwise, the sender places the exit reason
- * in the pending_exit field of the process struct and if necessary
- * adds the receiver to the run queue. It is typically not possible
- * to set a scheduled process or a process which we cannot get all locks
- * on without releasing locks on it in an exiting state straight away.
- *
- * The receiver will poll the pending_exit field when it reach certain
- * places during it's execution. When it discovers the pending exit
- * it will change state into the exiting state. If the receiver wasn't
- * scheduled when the pending exit was set, the first scheduler that
- * schedules a new process will set the receiving process in the exiting
- * state just before it schedules next process.
- *
- * When the exit signal is placed in the pending_exit field, the signal
- * is considered as being in transit on the Erlang level. The signal is
- * actually in some kind of semi transit state, since we have already
- * determined how it should be received. It will exit the process no
- * matter what if it is received (the process may exit by itself before
- * reception of the exit signal). The signal is received when it is
- * discovered in the pending_exit field by the receiver.
- *
- * The receiver have to poll the pending_exit field at least before:
- * - moving messages from the message in queue to the private message
- * queue. This in order to preserve signal order.
- * - unlink. Otherwise the process might get exited on a link that
- * have been removed.
- * - changing the trap_exit flag to true. This in order to simplify the
- * implementation; otherwise, we would have to transform the signal
- * into an 'EXIT' message when setting the trap_exit flag to true. We
- * would also have to maintain a queue of exit signals in transit.
- * - being scheduled in or out.
- */
-
-static ERTS_INLINE int
-send_exit_signal(Process *c_p, /* current process if and only
- if reason is stored on it */
- Eterm from, /* Id of sender of signal */
- Process *rp, /* receiving process */
- ErtsProcLocks *rp_locks,/* current locks on receiver */
- Eterm reason, /* exit reason */
- Eterm exit_tuple, /* Prebuild exit tuple
- or THE_NON_VALUE */
- Uint exit_tuple_sz, /* Size of prebuilt exit tuple
- (if exit_tuple != THE_NON_VALUE) */
- Eterm token, /* token */
- Process *token_update, /* token updater */
- Uint32 flags /* flags */
- )
-{
- erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
- Eterm rsn = reason == am_kill ? am_killed : reason;
-
- ERTS_SMP_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp));
- ERTS_SMP_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND)
- == ERTS_PROC_LOCKS_XSIG_SEND);
-
- ASSERT(reason != THE_NON_VALUE);
-
-#ifdef USE_VM_PROBES
- if(DTRACE_ENABLED(process_exit_signal) && is_pid(from)) {
- DTRACE_CHARBUF(sender_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(receiver_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(reason_buf, DTRACE_TERM_BUF_SIZE);
-
- dtrace_pid_str(from, sender_str);
- dtrace_proc_str(rp, receiver_str);
- erts_snprintf(reason_buf, sizeof(DTRACE_CHARBUF_NAME(reason_buf)) - 1, "%T", reason);
- DTRACE3(process_exit_signal, sender_str, receiver_str, reason_buf);
+void
+erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
+{
+ Process *c_p = ((ErtsProcExitContext *) vctxt)->c_p;
+ Eterm reason = ((ErtsProcExitContext *) vctxt)->reason;
+ ErtsMonitorData *mdp = NULL;
+
+ if (erts_monitor_is_target(mon)) {
+ /* We are being watched... */
+ switch (mon->type) {
+ case ERTS_MON_TYPE_SUSPEND:
+ case ERTS_MON_TYPE_PROC:
+ erts_proc_sig_send_monitor_down(mon, reason);
+ mon = NULL;
+ break;
+ case ERTS_MON_TYPE_PORT: {
+ Port *prt;
+ ASSERT(is_internal_port(mon->other.item));
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ prt = erts_id2port(mon->other.item);
+ if (prt) {
+ erts_fire_port_monitor(prt, mon);
+ erts_port_release(prt);
+ mon = NULL;
+ }
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ break;
+ }
+ case ERTS_MON_TYPE_RESOURCE:
+ erts_fire_nif_monitor(mon);
+ mon = NULL;
+ break;
+ case ERTS_MON_TYPE_DIST_PROC: {
+ ErtsMonLnkDist *dist;
+ DistEntry *dep;
+ ErtsDSigData dsd;
+ int code;
+ Eterm watcher;
+ Eterm watched;
+
+ mdp = erts_monitor_to_data(mon);
+
+ if (mon->flags & ERTS_ML_FLG_NAME)
+ watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
+ else
+ watched = c_p->common.id;
+ ASSERT(is_internal_pid(watched) || is_atom(watched));
+
+ watcher = mon->other.item;
+ ASSERT(is_external_pid(watcher));
+ dep = external_pid_dist_entry(watcher);
+ ASSERT(dep);
+ dist = ((ErtsMonitorDataExtended *) mdp)->dist;
+ ASSERT(dist);
+ code = erts_dsig_prepare(&dsd, dep, NULL, 0,
+ ERTS_DSP_NO_LOCK, 0, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_CONNECTED:
+ case ERTS_DSIG_PREP_PENDING:
+ if (dist->connection_id == dsd.connection_id) {
+ code = erts_dsig_send_m_exit(&dsd,
+ watcher,
+ watched,
+ mdp->ref,
+ reason);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ default:
+ break;
+ }
+ if (!erts_monitor_dist_delete(&mdp->origin))
+ mdp = NULL;
+ break;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid target monitor type");
+ break;
+ }
}
-#endif
-
- if ((state & ERTS_PSFLG_TRAP_EXIT)
- && (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) {
- /* have to release the status lock in order to send the exit message */
- erts_smp_proc_unlock(rp, *rp_locks & ERTS_PROC_LOCKS_XSIG_SEND);
- *rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
- if (have_seqtrace(token) && token_update)
- seq_trace_update_send(token_update);
- if (is_value(exit_tuple))
- send_exit_message(rp, rp_locks, exit_tuple, exit_tuple_sz, token);
- else
- erts_deliver_exit_message(from, rp, rp_locks, rsn, token);
- return 1; /* Receiver will get a message */
- }
- else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) {
-#ifdef ERTS_SMP
- if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))) {
- ASSERT(!rp->pending_exit.bp);
-
- if (rp == c_p && (*rp_locks & ERTS_PROC_LOCK_MAIN)) {
- /* Ensure that all locks on c_p are locked before
- proceeding... */
- if (*rp_locks != ERTS_PROC_LOCKS_ALL) {
- ErtsProcLocks need_locks = (~(*rp_locks)
- & ERTS_PROC_LOCKS_ALL);
- if (erts_smp_proc_trylock(c_p, need_locks) == EBUSY) {
- erts_smp_proc_unlock(c_p,
- *rp_locks & ~ERTS_PROC_LOCK_MAIN);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- *rp_locks = ERTS_PROC_LOCKS_ALL;
- }
- set_proc_exiting(c_p, state, rsn, NULL);
- }
- else if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
- /* Process not running ... */
- ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
- if (need_locks
- && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
- /* ... but we havn't got all locks on it ... */
- save_pending_exiter(rp, NULL);
- /*
- * The pending exit will be discovered when next
- * process is scheduled in
- */
- goto set_pending_exit;
- }
- /* ...and we have all locks on it... */
- *rp_locks = ERTS_PROC_LOCKS_ALL;
- set_proc_exiting(rp,
- state,
- (is_immed(rsn)
- ? rsn
- : copy_object(rsn, rp)),
- NULL);
- }
- else { /* Process running... */
-
- /*
- * The pending exit will be discovered when the process
- * is scheduled out if not discovered earlier.
- */
-
- set_pending_exit:
- if (is_immed(rsn)) {
- rp->pending_exit.reason = rsn;
- }
- else {
- Eterm *hp;
- Uint sz = size_object(rsn);
- ErlHeapFragment *bp = new_message_buffer(sz);
-
- hp = &bp->mem[0];
- rp->pending_exit.reason = copy_struct(rsn,
- sz,
- &hp,
- &bp->off_heap);
- rp->pending_exit.bp = bp;
- }
- erts_smp_atomic32_read_bor_relb(&rp->state,
- ERTS_PSFLG_PENDING_EXIT);
- }
- }
- /* else:
- *
- * The receiver already has a pending exit (or is exiting)
- * so we drop this signal.
- *
- * NOTE: dropping this exit signal is based on the assumption
- * that the receiver *will* exit; either on the pending
- * exit or by itself before seeing the pending exit.
- */
-#else /* !ERTS_SMP */
- erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
- if (!(state & ERTS_PSFLG_EXITING)) {
- set_proc_exiting(rp,
- state,
- (is_immed(rsn) || c_p == rp
- ? rsn
- : copy_object(rsn, rp)),
- NULL);
- }
-#endif
- return -1; /* Receiver will exit */
+ else { /* Origin monitor */
+ /* We are watching someone else... */
+ switch (mon->type) {
+ case ERTS_MON_TYPE_SUSPEND:
+ case ERTS_MON_TYPE_PROC:
+ erts_proc_sig_send_demonitor(mon);
+ mon = NULL;
+ break;
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ erts_demonitor_time_offset(mon);
+ mon = NULL;
+ break;
+ case ERTS_MON_TYPE_NODE:
+ mdp = erts_monitor_to_data(mon);
+ if (!erts_monitor_dist_delete(&mdp->target))
+ mdp = NULL;
+ break;
+ case ERTS_MON_TYPE_NODES:
+ erts_monitor_nodes_delete(mon);
+ mon = NULL;
+ break;
+ case ERTS_MON_TYPE_PORT: {
+ Port *prt;
+ ASSERT(is_internal_port(mon->other.item));
+ prt = erts_port_lookup_raw(mon->other.item);
+ if (prt) {
+ if (erts_port_demonitor(c_p, prt, mon) != ERTS_PORT_OP_DROPPED)
+ mon = NULL;
+ }
+ break;
+ }
+ case ERTS_MON_TYPE_DIST_PROC: {
+ ErtsMonLnkDist *dist;
+ DistEntry *dep;
+ ErtsDSigData dsd;
+ int code;
+ Eterm watched;
+
+ mdp = erts_monitor_to_data(mon);
+ dist = ((ErtsMonitorDataExtended *) mdp)->dist;
+ ASSERT(dist);
+ if (mon->flags & ERTS_ML_FLG_NAME) {
+ watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
+ ASSERT(is_atom(watched));
+ dep = erts_sysname_to_connected_dist_entry(dist->nodename);
+ }
+ else {
+ watched = mon->other.item;
+ ASSERT(is_external_pid(watched));
+ dep = external_pid_dist_entry(watched);
+ }
+ code = erts_dsig_prepare(&dsd, dep, NULL, 0,
+ ERTS_DSP_NO_LOCK, 0, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_CONNECTED:
+ case ERTS_DSIG_PREP_PENDING:
+ if (dist->connection_id == dsd.connection_id) {
+ code = erts_dsig_send_demonitor(&dsd,
+ c_p->common.id,
+ watched,
+ mdp->ref,
+ 1);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ default:
+ break;
+ }
+ if (!erts_monitor_dist_delete(&mdp->target))
+ mdp = NULL;
+ break;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid origin monitor type");
+ break;
+ }
}
- return 0; /* Receiver unaffected */
+ if (mdp)
+ erts_monitor_release_both(mdp);
+ else if (mon)
+ erts_monitor_release(mon);
}
-
-int
-erts_send_exit_signal(Process *c_p,
- Eterm from,
- Process *rp,
- ErtsProcLocks *rp_locks,
- Eterm reason,
- Eterm token,
- Process *token_update,
- Uint32 flags)
-{
- return send_exit_signal(c_p,
- from,
- rp,
- rp_locks,
- reason,
- THE_NON_VALUE,
- 0,
- token,
- token_update,
- flags);
-}
-
-typedef struct {
- Eterm reason;
- Process *p;
-} ExitMonitorContext;
-
-static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
-{
- ExitMonitorContext *pcontext = vpcontext;
- DistEntry *dep;
- ErtsMonitor *rmon;
-
- switch (mon->type) {
- case MON_ORIGIN:
- /* We are monitoring someone else, we need to demonitor that one.. */
- if (is_atom(mon->pid)) { /* remote by name */
- ASSERT(is_node_name_atom(mon->pid));
- dep = erts_sysname_to_connected_dist_entry(mon->pid);
- if (dep) {
- erts_smp_de_links_lock(dep);
- rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
- erts_smp_de_links_unlock(dep);
- if (rmon) {
- ErtsDSigData dsd;
- int code = erts_dsig_prepare(&dsd, dep, NULL,
- ERTS_DSP_NO_LOCK, 0);
- if (code == ERTS_DSIG_PREP_CONNECTED) {
- code = erts_dsig_send_demonitor(&dsd,
- rmon->pid,
- mon->name,
- mon->ref,
- 1);
- ASSERT(code == ERTS_DSIG_SEND_OK);
- }
- erts_destroy_monitor(rmon);
- }
- erts_deref_dist_entry(dep);
- }
- } else {
- ASSERT(is_pid(mon->pid) || is_port(mon->pid));
- /* if is local by pid or name */
- if (is_internal_pid(mon->pid)) {
- Process *rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
- if (!rp) {
- goto done;
- }
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- if (rmon == NULL) {
- goto done;
- }
- erts_destroy_monitor(rmon);
- } else if (is_internal_port(mon->pid)) {
- /* Is a local port */
- Port *prt = erts_port_lookup_raw(mon->pid);
- if (!prt) {
- goto done;
- }
- erts_port_demonitor(pcontext->p,
- ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED,
- prt, mon->ref, NULL);
- } else { /* remote by pid */
- ASSERT(is_external_pid(mon->pid));
- dep = external_pid_dist_entry(mon->pid);
- ASSERT(dep != NULL);
- if (dep) {
- erts_smp_de_links_lock(dep);
- rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
- erts_smp_de_links_unlock(dep);
- if (rmon) {
- ErtsDSigData dsd;
- int code = erts_dsig_prepare(&dsd, dep, NULL,
- ERTS_DSP_NO_LOCK, 0);
- if (code == ERTS_DSIG_PREP_CONNECTED) {
- code = erts_dsig_send_demonitor(&dsd,
- rmon->pid,
- mon->pid,
- mon->ref,
- 1);
- ASSERT(code == ERTS_DSIG_SEND_OK);
- }
- erts_destroy_monitor(rmon);
- }
- }
- }
- }
- break;
- case MON_TARGET:
- ASSERT(mon->type == MON_TARGET);
- ASSERT(is_pid(mon->pid) || is_internal_port(mon->pid));
- if (is_internal_port(mon->pid)) {
- Port *prt = erts_id2port(mon->pid);
- if (prt == NULL) {
- goto done;
- }
- erts_fire_port_monitor(prt, mon->ref);
- erts_port_release(prt);
- } else if (is_internal_pid(mon->pid)) {/* local by name or pid */
- Eterm watched;
- Process *rp;
- DeclareTmpHeapNoproc(lhp,3);
- ErtsProcLocks rp_locks = (ERTS_PROC_LOCK_LINK
- | ERTS_PROC_LOCKS_MSG_SEND);
- rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
- if (rp == NULL) {
- goto done;
- }
- UseTmpHeapNoproc(3);
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
- if (rmon) {
- erts_destroy_monitor(rmon);
- watched = (is_atom(mon->name)
- ? TUPLE2(lhp, mon->name,
- erts_this_dist_entry->sysname)
- : pcontext->p->common.id);
- erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process,
- watched, pcontext->reason);
- }
- UnUseTmpHeapNoproc(3);
- /* else: demonitor while we exited, i.e. do nothing... */
- erts_smp_proc_unlock(rp, rp_locks);
- } else { /* external by pid or name */
- ASSERT(is_external_pid(mon->pid));
- dep = external_pid_dist_entry(mon->pid);
- ASSERT(dep != NULL);
- if (dep) {
- erts_smp_de_links_lock(dep);
- rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
- erts_smp_de_links_unlock(dep);
- if (rmon) {
- ErtsDSigData dsd;
- int code = erts_dsig_prepare(&dsd, dep, NULL,
- ERTS_DSP_NO_LOCK, 0);
- if (code == ERTS_DSIG_PREP_CONNECTED) {
- code = erts_dsig_send_m_exit(&dsd,
- mon->pid,
- (rmon->name != NIL
- ? rmon->name
- : rmon->pid),
- mon->ref,
- pcontext->reason);
- ASSERT(code == ERTS_DSIG_SEND_OK);
- }
- erts_destroy_monitor(rmon);
- }
- }
- }
- break;
- case MON_TIME_OFFSET:
- erts_demonitor_time_offset(mon->ref);
- break;
- default:
- ERTS_INTERNAL_ERROR("Invalid monitor type");
+void
+erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
+{
+ Process *c_p = ((ErtsProcExitContext *) vctxt)->c_p;
+ Eterm reason = ((ErtsProcExitContext *) vctxt)->reason;
+ ErtsLinkData *ldp = NULL;
+
+ switch (lnk->type) {
+ case ERTS_LNK_TYPE_PROC:
+ ASSERT(is_internal_pid(lnk->other.item));
+ erts_proc_sig_send_link_exit(c_p, c_p->common.id, lnk,
+ reason, SEQ_TRACE_TOKEN(c_p));
+ lnk = NULL;
+ break;
+ case ERTS_LNK_TYPE_PORT: {
+ Port *prt;
+ ASSERT(is_internal_port(lnk->other.item));
+ prt = erts_port_lookup(lnk->other.item,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ if (prt)
+ erts_port_exit(NULL,
+ (ERTS_PORT_SIG_FLG_FORCE_SCHED
+ | ERTS_PORT_SIG_FLG_BROKEN_LINK),
+ prt,
+ c_p->common.id,
+ reason,
+ NULL);
+ break;
+ }
+ case ERTS_LNK_TYPE_DIST_PROC: {
+ DistEntry *dep;
+ ErtsMonLnkDist *dist;
+ ErtsLink *dlnk;
+ ErtsDSigData dsd;
+ int code;
+
+ dlnk = erts_link_to_other(lnk, &ldp);
+ dist = ((ErtsLinkDataExtended *) ldp)->dist;
+
+ ASSERT(is_external_pid(lnk->other.item));
+ dep = external_pid_dist_entry(lnk->other.item);
+
+ ASSERT(dep != erts_this_dist_entry);
+
+ if (!erts_link_dist_delete(dlnk))
+ ldp = NULL;
+
+ code = erts_dsig_prepare(&dsd, dep, c_p, 0, ERTS_DSP_NO_LOCK, 0, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_CONNECTED:
+ case ERTS_DSIG_PREP_PENDING:
+ if (dist->connection_id == dsd.connection_id) {
+ code = erts_dsig_send_exit_tt(&dsd,
+ c_p->common.id,
+ lnk->other.item,
+ reason,
+ SEQ_TRACE_TOKEN(c_p));
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ }
+ break;
}
- done:
- /* As the monitors are previously removed from the process,
- distribution operations will not cause monitors to disappear,
- we can safely delete it. */
-
- erts_destroy_monitor(mon);
-}
-
-typedef struct {
- Process *p;
- Eterm reason;
- Eterm exit_tuple;
- Uint exit_tuple_sz;
-} ExitLinkContext;
-
-static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
-{
- ExitLinkContext *pcontext = vpcontext;
- /* Unpack context, it's readonly */
- Process *p = pcontext->p;
- Eterm reason = pcontext->reason;
- Eterm exit_tuple = pcontext->exit_tuple;
- Uint exit_tuple_sz = pcontext->exit_tuple_sz;
- Eterm item = lnk->pid;
- ErtsLink *rlnk;
- DistEntry *dep;
- Process *rp;
-
-
- switch(lnk->type) {
- case LINK_PID:
- if(is_internal_port(item)) {
- Port *prt = erts_port_lookup(item, ERTS_PORT_SFLGS_INVALID_LOOKUP);
- if (prt)
- erts_port_exit(NULL,
- (ERTS_PORT_SIG_FLG_FORCE_SCHED
- | ERTS_PORT_SIG_FLG_BROKEN_LINK),
- prt,
- p->common.id,
- reason,
- NULL);
- }
- else if(is_external_port(item)) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Erroneous link between %T and external port %T "
- "found\n",
- p->common.id,
- item);
- erts_send_error_to_logger_nogl(dsbufp);
- ASSERT(0); /* It isn't possible to setup such a link... */
- }
- else if (is_internal_pid(item)) {
- ErtsProcLocks rp_locks = (ERTS_PROC_LOCK_LINK
- | ERTS_PROC_LOCKS_XSIG_SEND);
- rp = erts_pid2proc(NULL, 0, item, rp_locks);
- if (rp) {
- rlnk = erts_remove_link(&ERTS_P_LINKS(rp), p->common.id);
- /* If rlnk == NULL, we got unlinked while exiting,
- i.e., do nothing... */
- if (rlnk) {
- int xres;
- erts_destroy_link(rlnk);
- xres = send_exit_signal(NULL,
- p->common.id,
- rp,
- &rp_locks,
- reason,
- exit_tuple,
- exit_tuple_sz,
- SEQ_TRACE_TOKEN(p),
- p,
- ERTS_XSIG_FLG_IGN_KILL);
- if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- /* We didn't exit the process and it is traced */
- if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
- rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
- }
- trace_proc(NULL, 0, rp, am_getting_unlinked, p->common.id);
- }
- }
- }
- ASSERT(rp != p);
- erts_smp_proc_unlock(rp, rp_locks);
- }
- }
- else if (is_external_pid(item)) {
- dep = external_pid_dist_entry(item);
- if(dep != erts_this_dist_entry) {
- ErtsDSigData dsd;
- int code;
- ErtsDistLinkData dld;
- erts_remove_dist_link(&dld, p->common.id, item, dep);
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, 0);
- if (code == ERTS_DSIG_PREP_CONNECTED) {
- code = erts_dsig_send_exit_tt(&dsd, p->common.id, item,
- reason, SEQ_TRACE_TOKEN(p));
- ASSERT(code == ERTS_DSIG_SEND_OK);
- }
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_destroy_dist_link(&dld);
- }
- }
- break;
- case LINK_NODE:
- ASSERT(is_node_name_atom(item));
- dep = erts_sysname_to_connected_dist_entry(item);
- if(dep) {
- /* dist entries have node links in a separate structure to
- avoid confusion */
- erts_smp_de_links_lock(dep);
- rlnk = erts_remove_link(&(dep->node_links), p->common.id);
- erts_smp_de_links_unlock(dep);
- if (rlnk)
- erts_destroy_link(rlnk);
- erts_deref_dist_entry(dep);
- }
- break;
-
default:
- erts_exit(ERTS_ERROR_EXIT, "bad type in link list\n");
- break;
+ ERTS_INTERNAL_ERROR("Unexpected link type");
+ break;
}
- erts_destroy_link(lnk);
-}
-static void
-resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
-{
- Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN,
- smon->pid, ERTS_PROC_LOCK_STATUS);
- if (suspendee) {
- ASSERT(suspendee != vc_p);
- if (smon->active)
- resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- }
- erts_destroy_suspend_monitor(smon);
+ if (ldp)
+ erts_link_release_both(ldp);
+ else if (lnk)
+ erts_link_release(lnk);
}
/* this function fishishes a process and propagates exit messages - called
@@ -12798,8 +12265,6 @@ void
erts_do_exit_process(Process* p, Eterm reason)
{
p->arity = 0; /* No live registers */
- p->fvalue = reason;
-
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_exit)) {
@@ -12816,31 +12281,14 @@ erts_do_exit_process(Process* p, Eterm reason)
erts_exit(ERTS_DUMP_EXIT, "System process %T terminated: %T\n",
p->common.id, reason);
-#ifdef ERTS_SMP
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
/* By locking all locks (main lock is already locked) when going
to exiting state (ERTS_PSFLG_EXITING), it is enough to take any lock when
looking up a process (erts_pid2proc()) to prevent the looked up
process from exiting until the lock has been released. */
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
-
-#ifndef ERTS_SMP
- set_proc_self_exiting(p);
-#else
- if (ERTS_PSFLG_PENDING_EXIT & set_proc_self_exiting(p)) {
- /* Process exited before pending exit was received... */
- p->pending_exit.reason = THE_NON_VALUE;
- if (p->pending_exit.bp) {
- free_message_buffer(p->pending_exit.bp);
- p->pending_exit.bp = NULL;
- }
- }
-
- cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL);
+ erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
-#endif
+ set_self_exiting(p, reason, NULL, NULL, NULL);
if (IS_TRACED(p)) {
if (IS_TRACED_FL(p, F_TRACE_CALLS))
@@ -12859,7 +12307,7 @@ erts_do_exit_process(Process* p, Eterm reason)
ASSERT(erts_proc_read_refc(p) > 0);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
if (IS_TRACED_FL(p,F_TRACE_PROCS))
trace_proc(p, ERTS_PROC_LOCK_MAIN, p, am_exit, reason);
@@ -12877,25 +12325,27 @@ erts_do_exit_process(Process* p, Eterm reason)
void
erts_continue_exit_process(Process *p)
{
- ErtsLink* lnk;
- ErtsMonitor *mon;
+ ErtsLink *links;
+ ErtsMonitor *monitors;
+ ErtsMonitor *lt_monitors;
ErtsProcLocks curr_locks = ERTS_PROC_LOCK_MAIN;
Eterm reason = p->fvalue;
- DistEntry *dep;
+ DistEntry *dep = NULL;
erts_aint32_t state;
int delay_del_proc = 0;
+ ErtsProcExitContext pectxt;
#ifdef DEBUG
int yield_allowed = 1;
#endif
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
ASSERT(ERTS_PROC_IS_EXITING(p));
ASSERT(erts_proc_read_refc(p) > 0);
if (p->bif_timers) {
- if (erts_cancel_bif_timers(p, p->bif_timers, &p->u.terminate)) {
+ if (erts_cancel_bif_timers(p, &p->bif_timers, &p->u.terminate)) {
ASSERT(erts_proc_read_refc(p) > 0);
goto yield;
}
@@ -12903,20 +12353,6 @@ erts_continue_exit_process(Process *p)
p->bif_timers = NULL;
}
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (p->accessor_bif_timers) {
- if (erts_detach_accessor_bif_timers(p,
- p->accessor_bif_timers,
- &p->u.terminate)) {
- ASSERT(erts_proc_read_refc(p) > 0);
- goto yield;
- }
- ASSERT(erts_proc_read_refc(p) > 0);
- p->accessor_bif_timers = NULL;
- }
-#endif
-
-#ifdef ERTS_SMP
if (p->flags & F_SCHDLR_ONLN_WAITQ)
abort_sched_onln_chng_waitq(p);
@@ -12960,7 +12396,6 @@ erts_continue_exit_process(Process *p)
__FILE__, __LINE__, (int) ssr);
}
}
-#endif
if (p->flags & F_USING_DB) {
if (erts_db_process_exiting(p, ERTS_PROC_LOCK_MAIN))
@@ -12969,30 +12404,24 @@ erts_continue_exit_process(Process *p)
}
erts_set_gc_state(p, 1);
- state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_ACTIVE_SYS) {
+ state = erts_atomic32_read_acqb(&p->state);
+ if ((state & ERTS_PSFLG_SYS_TASKS) || p->dirty_sys_tasks) {
if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2)
goto yield;
}
+#ifdef DEBUG
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ ASSERT(ERTS_PROC_GET_DELAYED_GC_TASK_QS(p) == NULL);
+ ASSERT(p->dirty_sys_tasks == NULL);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+#endif
+
if (p->flags & F_USING_DDLL) {
erts_ddll_proc_dead(p, ERTS_PROC_LOCK_MAIN);
p->flags &= ~F_USING_DDLL;
}
- if (p->nodes_monitors) {
- erts_delete_nodes_monitors(p, ERTS_PROC_LOCK_MAIN);
- p->nodes_monitors = NULL;
- }
-
-
- if (p->suspend_monitors) {
- erts_sweep_suspend_monitors(p->suspend_monitors,
- resume_suspend_monitor,
- p);
- p->suspend_monitors = NULL;
- }
-
/*
* The registered name *should* be the last "erlang resource" to
* cleanup.
@@ -13005,7 +12434,7 @@ erts_continue_exit_process(Process *p)
if (IS_TRACED_FL(p, F_TRACE_SCHED_EXIT))
trace_sched(p, curr_locks, am_out_exited);
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
curr_locks = ERTS_PROC_LOCKS_ALL;
/*
@@ -13020,31 +12449,28 @@ erts_continue_exit_process(Process *p)
* Note! The monitor and link fields will be overwritten
* by erts_ptab_delete_element() below.
*/
- mon = ERTS_P_MONITORS(p);
- lnk = ERTS_P_LINKS(p);
+ links = ERTS_P_LINKS(p);
+ monitors = ERTS_P_MONITORS(p);
+ lt_monitors = ERTS_P_LT_MONITORS(p);
{
/* Do *not* use erts_get_runq_proc() */
ErtsRunQueue *rq;
rq = erts_get_runq_current(erts_proc_sched_data(p));
- erts_smp_runq_lock(rq);
+ erts_runq_lock(rq);
-#ifdef ERTS_SMP
ASSERT(p->scheduler_data);
ASSERT(p->scheduler_data->current_process == p);
ASSERT(p->scheduler_data->free_process == NULL);
p->scheduler_data->current_process = NULL;
p->scheduler_data->free_process = p;
-#else
- erts_proc_inc_refc(p); /* Decremented in schedule() */
-#endif
/* Time of death! */
erts_ptab_delete_element(&erts_proc, &p->common);
- erts_smp_runq_unlock(rq);
+ erts_runq_unlock(rq);
}
/*
@@ -13056,90 +12482,122 @@ erts_continue_exit_process(Process *p)
{
/* Inactivate and notify free */
- erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state);
+ erts_aint32_t n, e, a = erts_atomic32_read_nob(&p->state);
int refc_inced = 0;
while (1) {
n = e = a;
ASSERT(a & ERTS_PSFLG_EXITING);
n |= ERTS_PSFLG_FREE;
- n &= ~ERTS_PSFLG_ACTIVE;
+ n &= ~(ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS);
if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) {
erts_proc_inc_refc(p);
refc_inced = 1;
}
- a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (a & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ if (a & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
p->flags |= F_DELAYED_DEL_PROC;
delay_del_proc = 1;
/*
- * The dirty scheduler will also decrease
- * refc when done...
+ * The dirty scheduler decrease refc
+ * when done with the process...
*/
- erts_proc_inc_refc(p);
}
-#endif
if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
erts_proc_dec_refc(p);
}
-
- dep = (p->flags & F_DISTRIBUTION) ? erts_this_dist_entry : NULL;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ dep = ((p->flags & F_DISTRIBUTION)
+ ? ERTS_PROC_SET_DIST_ENTRY(p, NULL)
+ : NULL);
- if (dep) {
- erts_do_net_exits(dep, reason);
- }
/*
- * Pre-build the EXIT tuple if there are any links.
+ * It might show up signal prio elevation tasks until we
+ * have entered free state. Cleanup such tasks now.
*/
- if (lnk) {
- DeclareTmpHeap(tmp_heap,4,p);
- Eterm exit_tuple;
- Uint exit_tuple_sz;
- Eterm* hp;
+ state = erts_atomic32_read_acqb(&p->state);
+ if (!(state & ERTS_PSFLG_SYS_TASKS))
+ erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ else {
+ erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+
+ do {
+ (void) cleanup_sys_tasks(p, state, CONTEXT_REDS);
+ state = erts_atomic32_read_acqb(&p->state);
+ } while (state & ERTS_PSFLG_SYS_TASKS);
+
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ }
+
+#ifdef DEBUG
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ ASSERT(p->sys_task_qs == NULL);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+#endif
- UseTmpHeap(4,p);
- hp = &tmp_heap[0];
+ if (dep) {
+ erts_do_net_exits(dep, (reason == am_kill) ? am_killed : reason);
+ erts_deref_dist_entry(dep);
+ }
- exit_tuple = TUPLE3(hp, am_EXIT, p->common.id, reason);
+ pectxt.c_p = p;
+ pectxt.reason = reason;
- exit_tuple_sz = size_object(exit_tuple);
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ);
- {
- ExitLinkContext context = {p, reason, exit_tuple, exit_tuple_sz};
- erts_sweep_links(lnk, &doit_exit_link, &context);
- }
- UnUseTmpHeap(4,p);
+ erts_proc_sig_fetch(p);
+
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+
+ if (links) {
+ erts_link_tree_foreach_delete(&links,
+ erts_proc_exit_handle_link,
+ (void *) &pectxt);
+ ASSERT(!links);
}
- {
- ExitMonitorContext context = {reason, p};
- erts_sweep_monitors(mon,&doit_exit_monitor,&context); /* Allocates TmpHeap, but we
- have none here */
+ if (monitors) {
+ erts_monitor_tree_foreach_delete(&monitors,
+ erts_proc_exit_handle_monitor,
+ (void *) &pectxt);
+ ASSERT(!monitors);
}
-#ifdef ERTS_SMP
- erts_flush_trace_messages(p, 0);
-#endif
+ if (lt_monitors) {
+ erts_monitor_list_foreach_delete(&lt_monitors,
+ erts_proc_exit_handle_monitor,
+ (void *) &pectxt);
+ ASSERT(!lt_monitors);
+ }
+
+ /*
+ * erts_proc_sig_handle_exit() implements yielding.
+ * However, this function cannot handle it yet... loop
+ * until done...
+ */
+ while (!0) {
+ int reds = CONTEXT_REDS;
+ if (erts_proc_sig_handle_exit(p, &reds))
+ break;
+ }
+
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ erts_flush_trace_messages(p, ERTS_PROC_LOCK_MAIN);
ERTS_TRACER_CLEAR(&ERTS_TRACER(p));
if (!delay_del_proc)
delete_process(p);
-#ifdef ERTS_SMP
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
-#endif
-
return;
yield:
@@ -13148,30 +12606,84 @@ erts_continue_exit_process(Process *p)
ASSERT(yield_allowed);
#endif
- ERTS_SMP_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks);
+ ERTS_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks);
p->i = (BeamInstr *) beam_continue_exit;
if (!(curr_locks & ERTS_PROC_LOCK_STATUS)) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
curr_locks |= ERTS_PROC_LOCK_STATUS;
}
if (curr_locks != ERTS_PROC_LOCK_MAIN)
- erts_smp_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks);
+ erts_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks);
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
BUMP_ALL_REDS(p);
}
+Process *
+erts_try_lock_sig_free_proc(Eterm pid, ErtsProcLocks locks,
+ erts_aint32_t *statep)
+{
+ Process *rp = erts_proc_lookup_raw(pid);
+ erts_aint32_t fail_state = ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q;
+ erts_aint32_t state;
+ ErtsProcLocks tmp_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ;
+
+ tmp_locks |= locks;
+ if (statep)
+ fail_state |= *statep;
+
+ if (!rp) {
+ if (statep)
+ *statep = ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE;
+ return NULL;
+ }
+
+ ERTS_LC_ASSERT(!erts_proc_lc_my_proc_locks(rp));
+
+ state = erts_atomic32_read_nob(&rp->state);
+ if (statep)
+ *statep = state;
+
+ if (state & ERTS_PSFLG_FREE)
+ return NULL;
+
+ if (state & fail_state)
+ return ERTS_PROC_LOCK_BUSY;
+
+ if (erts_proc_trylock(rp, tmp_locks) == EBUSY)
+ return ERTS_PROC_LOCK_BUSY;
+
+ state = erts_atomic32_read_nob(&rp->state);
+ if (statep)
+ *statep = state;
+
+ if ((state & fail_state)
+ || rp->sig_inq.first
+ || rp->sig_qs.cont) {
+ erts_proc_unlock(rp, tmp_locks);
+ if (state & ERTS_PSFLG_FREE)
+ return NULL;
+ else
+ return ERTS_PROC_LOCK_BUSY;
+ }
+
+ if (tmp_locks != locks)
+ erts_proc_unlock(rp, tmp_locks & ~locks);
+
+ return rp;
+}
+
/*
* Stack dump functions follow.
*/
void
-erts_stack_dump(int to, void *to_arg, Process *p)
+erts_stack_dump(fmtfn_t to, void *to_arg, Process *p)
{
Eterm* sp;
int yreg = -1;
@@ -13186,7 +12698,7 @@ erts_stack_dump(int to, void *to_arg, Process *p)
}
void
-erts_program_counter_info(int to, void *to_arg, Process *p)
+erts_program_counter_info(fmtfn_t to, void *to_arg, Process *p)
{
erts_aint32_t state;
int i;
@@ -13197,7 +12709,7 @@ erts_program_counter_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "CP: %p (", p->cp);
print_function_from_pc(to, to_arg, p->cp);
erts_print(to, to_arg, ")\n");
- state = erts_smp_atomic32_read_acqb(&p->state);
+ state = erts_atomic32_read_acqb(&p->state);
if (!(state & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_GC))) {
@@ -13216,10 +12728,10 @@ erts_program_counter_info(int to, void *to_arg, Process *p)
}
static void
-print_function_from_pc(int to, void *to_arg, BeamInstr* x)
+print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
{
- BeamInstr* addr = find_function_from_pc(x);
- if (addr == NULL) {
+ ErtsCodeMFA *cmfa = find_function_from_pc(x);
+ if (cmfa == NULL) {
if (x == beam_exit) {
erts_print(to, to_arg, "<terminate process>");
} else if (x == beam_continue_exit) {
@@ -13233,12 +12745,13 @@ print_function_from_pc(int to, void *to_arg, BeamInstr* x)
}
} else {
erts_print(to, to_arg, "%T:%T/%d + %d",
- addr[0], addr[1], addr[2], ((x-addr)-2) * sizeof(Eterm));
+ cmfa->module, cmfa->function, cmfa->arity,
+ (x-(BeamInstr*)cmfa) * sizeof(Eterm));
}
}
static int
-stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg)
+stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg)
{
Eterm x = *sp;
@@ -13266,19 +12779,35 @@ stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg)
return yreg;
}
+static void print_current_process_info(fmtfn_t, void *to_arg, ErtsSchedulerData*);
+
/*
* Print scheduler information
*/
void
-erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
+erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp)
+{
int i;
erts_aint32_t flg;
- Process *p;
- erts_print(to, to_arg, "=scheduler:%u\n", esdp->no);
+ switch (esdp->type) {
+ case ERTS_SCHED_NORMAL:
+ erts_print(to, to_arg, "=scheduler:%u\n", esdp->no);
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ erts_print(to, to_arg, "=dirty_cpu_scheduler:%u\n",
+ (esdp->dirty_no + erts_no_schedulers));
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ erts_print(to, to_arg, "=dirty_io_scheduler:%u\n",
+ (esdp->dirty_no + erts_no_schedulers + erts_no_dirty_cpu_schedulers));
+ break;
+ default:
+ erts_print(to, to_arg, "=unknown_scheduler_type:%u\n", esdp->type);
+ break;
+ }
-#ifdef ERTS_SMP
- flg = erts_smp_atomic32_read_dirty(&esdp->ssi->flags);
+ flg = erts_atomic32_read_dirty(&esdp->ssi->flags);
erts_print(to, to_arg, "Scheduler Sleep Info Flags: ");
for (i = 0; i < ERTS_SSI_FLGS_MAX && flg; i++) {
erts_aint32_t chk = (1 << i);
@@ -13294,6 +12823,8 @@ erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
erts_print(to, to_arg, "WAITING"); break;
case ERTS_SSI_FLG_SUSPENDED:
erts_print(to, to_arg, "SUSPENDED"); break;
+ case ERTS_SSI_FLG_MSB_EXEC:
+ erts_print(to, to_arg, "MSB_EXEC"); break;
default:
erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
}
@@ -13303,7 +12834,6 @@ erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
}
}
erts_print(to, to_arg, "\n");
-#endif
flg = erts_atomic32_read_dirty(&esdp->ssi->aux_work);
erts_print(to, to_arg, "Scheduler Sleep Info Aux Work: ");
@@ -13321,10 +12851,24 @@ erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
}
erts_print(to, to_arg, "\n");
- erts_print(to, to_arg, "Current Port: ");
- if (esdp->current_port)
- erts_print(to, to_arg, "%T", esdp->current_port->common.id);
- erts_print(to, to_arg, "\n");
+ if (esdp->type == ERTS_SCHED_NORMAL) {
+ erts_print(to, to_arg, "Current Port: ");
+ if (esdp->current_port)
+ erts_print(to, to_arg, "%T", esdp->current_port->common.id);
+ erts_print(to, to_arg, "\n");
+
+ erts_print_run_queue_info(to, to_arg, esdp->run_queue);
+ }
+
+ /* This *MUST* to be the last information in scheduler block */
+ print_current_process_info(to, to_arg, esdp);
+}
+
+void erts_print_run_queue_info(fmtfn_t to, void *to_arg,
+ ErtsRunQueue *run_queue)
+{
+ erts_aint32_t flg;
+ int i;
for (i = 0; i < ERTS_NO_PROC_PRIO_LEVELS; i++) {
erts_print(to, to_arg, "Run Queue ");
@@ -13346,12 +12890,12 @@ erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
break;
}
erts_print(to, to_arg, "Length: %d\n",
- erts_smp_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len));
+ erts_atomic32_read_dirty(&run_queue->procs.prio_info[i].len));
}
erts_print(to, to_arg, "Run Queue Port Length: %d\n",
- erts_smp_atomic32_read_dirty(&esdp->run_queue->ports.info.len));
+ erts_atomic32_read_dirty(&run_queue->ports.info.len));
- flg = erts_smp_atomic32_read_dirty(&esdp->run_queue->flags);
+ flg = erts_atomic32_read_dirty(&run_queue->flags);
erts_print(to, to_arg, "Run Queue Flags: ");
for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) {
erts_aint32_t chk = (1 << i);
@@ -13403,6 +12947,12 @@ erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
erts_print(to, to_arg, "NONEMPTY"); break;
case ERTS_RUNQ_FLG_PROTECTED:
erts_print(to, to_arg, "PROTECTED"); break;
+ case ERTS_RUNQ_FLG_EXEC:
+ erts_print(to, to_arg, "EXEC"); break;
+ case ERTS_RUNQ_FLG_MSB_EXEC:
+ erts_print(to, to_arg, "MSB_EXEC"); break;
+ case ERTS_RUNQ_FLG_MISC_OP:
+ erts_print(to, to_arg, "MISC_OP"); break;
default:
erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
}
@@ -13412,12 +12962,18 @@ erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
}
}
erts_print(to, to_arg, "\n");
+}
+
+
+static void print_current_process_info(fmtfn_t to, void *to_arg,
+ ErtsSchedulerData* esdp)
+{
+ Process *p = esdp->current_process;
+ erts_aint32_t flg;
- /* This *MUST* to be the last information in scheduler block */
- p = esdp->current_process;
erts_print(to, to_arg, "Current Process: ");
if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) {
- flg = erts_smp_atomic32_read_dirty(&p->state);
+ flg = erts_atomic32_read_dirty(&p->state);
erts_print(to, to_arg, "%T\n", p->common.id);
erts_print(to, to_arg, "Current Process State: ");
@@ -13452,11 +13008,11 @@ erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
* A nice system halt closing all open port goes as follows:
* 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS
* on all schedulers, then schedules itself out.
- * 2) All shedulers detect this and set the flag halt_in_progress
+ * 2) All shedulers detect this and set the flag ERTS_RUNQ_FLG_HALTING
* on their run queue. The last scheduler sets all non-closed ports
* ERTS_PORT_SFLG_HALT. Global atomic erts_halt_progress is used
* as refcount to determine which is last.
- * 3) While the run ques has flag halt_in_progress no processes
+ * 3) While the run queues has flag ERTS_RUNQ_FLG_HALTING no processes
* will be scheduled, only ports.
* 4) When the last port closes that scheduler calls erlang:halt/1.
* The same global atomic is used as refcount.
@@ -13467,25 +13023,26 @@ erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
*/
void erts_halt(int code)
{
- if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress,
+ if (-1 == erts_atomic32_cmpxchg_acqb(&erts_halt_progress,
erts_no_schedulers,
-1)) {
-#ifdef ERTS_DIRTY_SCHEDULERS
- ERTS_DIRTY_CPU_RUNQ->halt_in_progress = 1;
- ERTS_DIRTY_IO_RUNQ->halt_in_progress = 1;
-#endif
+ ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_CPU_RUNQ, ERTS_RUNQ_FLG_HALTING);
+ ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_IO_RUNQ, ERTS_RUNQ_FLG_HALTING);
erts_halt_code = code;
notify_reap_ports_relb();
}
}
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int
erts_dbg_check_halloc_lock(Process *p)
{
ErtsSchedulerData *esdp;
if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
return 1;
+ if ((p->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ && ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()))
+ return 1;
if (p->common.id == ERTS_INVALID_PID)
return 1;
esdp = erts_proc_sched_data(p);
@@ -13496,3 +13053,24 @@ erts_dbg_check_halloc_lock(Process *p)
return 0;
}
#endif
+
+void
+erts_debug_later_op_foreach(void (*callback)(void*),
+ void (*func)(void *, ErtsThrPrgrVal, void *),
+ void *arg)
+{
+ int six;
+ if (!erts_thr_progress_is_blocking())
+ ERTS_INTERNAL_ERROR("Not blocking thread progress");
+
+ for (six = 0; six < erts_no_schedulers; six++) {
+ ErtsSchedulerData *esdp = &erts_aligned_scheduler_data[six].esd;
+ ErtsThrPrgrLaterOp *lop = esdp->aux_work_data.later_op.first;
+
+ while (lop) {
+ if (lop->func == callback)
+ func(arg, lop->later, lop->data);
+ lop = lop->next;
+ }
+ }
+}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 3347a7a60e..a1b029adbe 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,6 +21,8 @@
#ifndef __PROCESS_H__
#define __PROCESS_H__
+#include "sys.h"
+
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
#if (defined(ERL_PROCESS_C__) \
|| defined(ERL_PORT_TASK_C__) \
@@ -37,8 +39,6 @@
typedef struct process Process;
-#include "sys.h"
-
#define ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
#include "erl_process_lock.h" /* Only pull out important types... */
#undef ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
@@ -47,12 +47,11 @@ typedef struct process Process;
#include "erl_port.h"
#undef ERL_PORT_GET_PORT_TYPE_ONLY__
#include "erl_vm.h"
-#include "erl_smp.h"
#include "erl_message.h"
#include "erl_process_dict.h"
#include "erl_node_container_utils.h"
#include "erl_node_tables.h"
-#include "erl_monitors.h"
+#include "erl_monitor_link.h"
#include "erl_hl_timer.h"
#include "erl_time.h"
#include "erl_atom_table.h"
@@ -63,6 +62,9 @@ typedef struct process Process;
#define ERTS_ONLY_INCLUDE_TRACE_FLAGS
#include "erl_trace.h"
#undef ERTS_ONLY_INCLUDE_TRACE_FLAGS
+#define ERTS_ONLY_SCHED_SPEC_ETS_DATA
+#include "erl_db.h"
+#undef ERTS_ONLY_SCHED_SPEC_ETS_DATA
#ifdef HIPE
#include "hipe_process.h"
@@ -73,8 +75,6 @@ typedef struct process Process;
#include "erl_thr_progress.h"
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
-struct ErtsNodesMonitor_;
-
#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 0
#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT 0
@@ -93,10 +93,6 @@ struct ErtsNodesMonitor_;
#define ERTS_HEAP_FREE(Type, Ptr, Size) \
erts_free((Type), (Ptr))
-#define INITIAL_MOD 0
-#define INITIAL_FUN 1
-#define INITIAL_ARI 2
-
#include "export.h"
struct saved_calls {
@@ -107,22 +103,20 @@ struct saved_calls {
};
extern Export exp_send, exp_receive, exp_timeout;
-extern int erts_eager_check_io;
-extern int erts_sched_compact_load;
-extern int erts_sched_balance_util;
-extern Uint erts_no_schedulers;
-#ifdef ERTS_DIRTY_SCHEDULERS
-extern Uint erts_no_dirty_cpu_schedulers;
-extern Uint erts_no_dirty_io_schedulers;
-#endif
-extern Uint erts_no_run_queues;
+extern int ERTS_WRITE_UNLIKELY(erts_sched_compact_load);
+extern int ERTS_WRITE_UNLIKELY(erts_sched_balance_util);
+extern Uint ERTS_WRITE_UNLIKELY(erts_no_schedulers);
+extern Uint ERTS_WRITE_UNLIKELY(erts_no_total_schedulers);
+extern Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_cpu_schedulers);
+extern Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_io_schedulers);
+extern Uint ERTS_WRITE_UNLIKELY(erts_no_run_queues);
extern int erts_sched_thread_suggested_stack_size;
-#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 4 /* Kilo words */
+extern int erts_dcpu_sched_thread_suggested_stack_size;
+extern int erts_dio_sched_thread_suggested_stack_size;
+#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 20 /* Kilo words */
#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
-#ifdef ERTS_SMP
#include "erl_bits.h"
-#endif
/* process priorities */
#define PRIORITY_MAX 0
@@ -173,8 +167,16 @@ extern int erts_sched_thread_suggested_stack_size;
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 6))
#define ERTS_RUNQ_FLG_EXEC \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 7))
+#define ERTS_RUNQ_FLG_MSB_EXEC \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 8))
+#define ERTS_RUNQ_FLG_MISC_OP \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 9))
+#define ERTS_RUNQ_FLG_HALTING \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 10))
+#define ERTS_RUNQ_FLG_CHECKIO \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 11))
-#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 8)
+#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 12)
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
@@ -214,34 +216,37 @@ extern int erts_sched_thread_suggested_stack_size;
((FLGS) &= ~ERTS_RUNQ_FLG_EVACUATE((PRIO)))
#define ERTS_RUNQ_FLGS_INIT(RQ, INIT) \
- erts_smp_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT))
+ erts_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT))
#define ERTS_RUNQ_FLGS_SET(RQ, FLGS) \
- ((Uint32) erts_smp_atomic32_read_bor_relb(&(RQ)->flags, \
+ ((Uint32) erts_atomic32_read_bor_relb(&(RQ)->flags, \
(erts_aint32_t) (FLGS)))
#define ERTS_RUNQ_FLGS_SET_NOB(RQ, FLGS) \
- ((Uint32) erts_smp_atomic32_read_bor_nob(&(RQ)->flags, \
+ ((Uint32) erts_atomic32_read_bor_nob(&(RQ)->flags, \
(erts_aint32_t) (FLGS)))
#define ERTS_RUNQ_FLGS_BSET(RQ, MSK, FLGS) \
- ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \
+ ((Uint32) erts_atomic32_read_bset_relb(&(RQ)->flags, \
(erts_aint32_t) (MSK), \
(erts_aint32_t) (FLGS)))
#define ERTS_RUNQ_FLGS_UNSET(RQ, FLGS) \
- ((Uint32) erts_smp_atomic32_read_band_relb(&(RQ)->flags, \
+ ((Uint32) erts_atomic32_read_band_relb(&(RQ)->flags, \
(erts_aint32_t) ~(FLGS)))
#define ERTS_RUNQ_FLGS_UNSET_NOB(RQ, FLGS) \
- ((Uint32) erts_smp_atomic32_read_band_nob(&(RQ)->flags, \
+ ((Uint32) erts_atomic32_read_band_nob(&(RQ)->flags, \
(erts_aint32_t) ~(FLGS)))
#define ERTS_RUNQ_FLGS_GET(RQ) \
- ((Uint32) erts_smp_atomic32_read_acqb(&(RQ)->flags))
+ ((Uint32) erts_atomic32_read_acqb(&(RQ)->flags))
#define ERTS_RUNQ_FLGS_GET_NOB(RQ) \
- ((Uint32) erts_smp_atomic32_read_nob(&(RQ)->flags))
+ ((Uint32) erts_atomic32_read_nob(&(RQ)->flags))
#define ERTS_RUNQ_FLGS_GET_MB(RQ) \
- ((Uint32) erts_smp_atomic32_read_mb(&(RQ)->flags))
+ ((Uint32) erts_atomic32_read_mb(&(RQ)->flags))
#define ERTS_RUNQ_FLGS_READ_BSET(RQ, MSK, FLGS) \
- ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \
+ ((Uint32) erts_atomic32_read_bset_relb(&(RQ)->flags, \
(erts_aint32_t) (MSK), \
(erts_aint32_t) (FLGS)))
+#define ERTS_RUNQ_POINTER_MASK (~((erts_aint_t) 3))
+#define ERTS_RUNQ_BOUND_FLAG ((erts_aint_t) 1)
+
typedef enum {
ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED,
ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED,
@@ -265,8 +270,9 @@ typedef enum {
#define ERTS_SSI_FLG_TSE_SLEEPING (((erts_aint32_t) 1) << 2)
#define ERTS_SSI_FLG_WAITING (((erts_aint32_t) 1) << 3)
#define ERTS_SSI_FLG_SUSPENDED (((erts_aint32_t) 1) << 4)
+#define ERTS_SSI_FLG_MSB_EXEC (((erts_aint32_t) 1) << 5)
-#define ERTS_SSI_FLGS_MAX 5
+#define ERTS_SSI_FLGS_MAX 6
#define ERTS_SSI_FLGS_SLEEP_TYPE \
(ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
@@ -277,7 +283,8 @@ typedef enum {
#define ERTS_SSI_FLGS_ALL \
(ERTS_SSI_FLGS_SLEEP \
| ERTS_SSI_FLG_WAITING \
- | ERTS_SSI_FLG_SUSPENDED)
+ | ERTS_SSI_FLG_SUSPENDED \
+ | ERTS_SSI_FLG_MSB_EXEC)
/*
* Keep ERTS_SSI_AUX_WORK flags ordered in expected frequency
@@ -288,7 +295,7 @@ typedef enum {
* highest index...
*
* Remember to update description in erts_pre_init_process()
- * when adding new flags...
+ * and etp-commands when adding new flags...
*/
typedef enum {
@@ -304,9 +311,9 @@ typedef enum {
ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX,
ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX,
ERTS_SSI_AUX_WORK_MISC_IX,
- ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX,
ERTS_SSI_AUX_WORK_SET_TMO_IX,
ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX,
+ ERTS_SSI_AUX_WORK_YIELD_IX,
ERTS_SSI_AUX_WORK_REAP_PORTS_IX,
ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX, /* SHOULD be last flag index */
@@ -337,12 +344,12 @@ typedef enum {
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX)
#define ERTS_SSI_AUX_WORK_MISC \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_IX)
-#define ERTS_SSI_AUX_WORK_PENDING_EXITERS \
- (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX)
#define ERTS_SSI_AUX_WORK_SET_TMO \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_SET_TMO_IX)
#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX)
+#define ERTS_SSI_AUX_WORK_YIELD \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_YIELD_IX)
#define ERTS_SSI_AUX_WORK_REAP_PORTS \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_REAP_PORTS_IX)
#define ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED \
@@ -350,20 +357,18 @@ typedef enum {
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
-#ifdef ERTS_DIRTY_SCHEDULERS
typedef struct {
- erts_smp_spinlock_t lock;
+ erts_spinlock_t lock;
ErtsSchedulerSleepInfo *list;
} ErtsSchedulerSleepList;
-#endif
struct ErtsSchedulerSleepInfo_ {
-#ifdef ERTS_SMP
+ struct ErtsSchedulerData_ *esdp;
ErtsSchedulerSleepInfo *next;
ErtsSchedulerSleepInfo *prev;
- erts_smp_atomic32_t flags;
+ erts_atomic32_t flags;
erts_tse_t *event;
-#endif
+ struct erts_poll_thread *psi;
erts_atomic32_t aux_work;
};
@@ -396,12 +401,21 @@ typedef struct {
Process* last;
} ErtsRunPrioQueue;
+typedef enum {
+ ERTS_SCHED_NORMAL = 0,
+ ERTS_SCHED_DIRTY_CPU = 1,
+ ERTS_SCHED_DIRTY_IO = 2,
+
+ ERTS_SCHED_TYPE_FIRST = ERTS_SCHED_NORMAL,
+ ERTS_SCHED_TYPE_LAST = ERTS_SCHED_DIRTY_IO
+} ErtsSchedType;
+
typedef struct ErtsSchedulerData_ ErtsSchedulerData;
typedef struct ErtsRunQueue_ ErtsRunQueue;
typedef struct {
- erts_smp_atomic32_t len;
+ erts_atomic32_t len;
erts_aint32_t max_len;
int reds;
} ErtsRunQueueInfo;
@@ -412,7 +426,6 @@ typedef struct {
# define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 1
#endif
-#ifdef ERTS_SMP
#undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
@@ -455,36 +468,29 @@ struct ErtsMigrationPaths_ {
ErtsMigrationPath mpath[1];
};
-#endif /* ERTS_SMP */
struct ErtsRunQueue_ {
int ix;
- erts_smp_mtx_t mtx;
- erts_smp_cnd_t cnd;
+ erts_mtx_t mtx;
+ erts_cnd_t cnd;
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
ErtsSchedulerSleepList sleepers;
-#endif
-#endif
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
int woken;
- erts_smp_atomic32_t flags;
+ erts_atomic32_t flags;
int check_balance_reds;
int full_reds_history_sum;
int full_reds_history[ERTS_FULL_REDS_HISTORY_SIZE];
int out_of_work_count;
erts_aint32_t max_len;
- erts_smp_atomic32_t len;
+ erts_atomic32_t len;
int wakeup_other;
int wakeup_other_reds;
- int halt_in_progress;
struct {
- ErtsProcList *pending_exiters;
Uint context_switches;
Uint reductions;
@@ -498,7 +504,7 @@ struct ErtsRunQueue_ {
struct {
ErtsMiscOpList *start;
ErtsMiscOpList *end;
- erts_smp_atomic_t evac_runq;
+ erts_atomic_t evac_runq;
} misc;
struct {
@@ -511,16 +517,14 @@ struct ErtsRunQueue_ {
#endif
};
-#ifdef ERTS_SMP
extern long erts_runq_supervision_interval;
-#endif
typedef union {
ErtsRunQueue runq;
char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsRunQueue))];
} ErtsAlignedRunQueue;
-extern ErtsAlignedRunQueue *erts_aligned_run_queues;
+extern ErtsAlignedRunQueue * ERTS_WRITE_UNLIKELY(erts_aligned_run_queues);
#define ERTS_PROC_REDUCTIONS_EXECUTED(SD, RQ, PRIO, REDS, AREDS)\
do { \
@@ -540,13 +544,15 @@ do { \
} while (0)
typedef struct {
- int need; /* "+sbu true" or scheduler_wall_time enabled */
+ union {
+ erts_atomic32_t mod; /* on dirty schedulers */
+ int need; /* "+sbu true" or scheduler_wall_time enabled */
+ } u;
int enabled;
Uint64 start;
struct {
Uint64 total;
Uint64 start;
- int currently;
} working;
} ErtsSchedWallTime;
@@ -559,17 +565,12 @@ typedef struct {
int sched_id;
ErtsSchedulerData *esdp;
ErtsSchedulerSleepInfo *ssi;
-#ifdef ERTS_SMP
ErtsThrPrgrVal current_thr_prgr;
ErtsThrPrgrVal latest_wakeup;
-#endif
struct {
int ix;
-#ifdef ERTS_SMP
ErtsThrPrgrVal thr_prgr;
-#endif
} misc;
-#ifdef ERTS_SMP
struct {
ErtsThrPrgrVal thr_prgr;
} dd;
@@ -582,24 +583,22 @@ typedef struct {
ErtsThrPrgrLaterOp *first;
ErtsThrPrgrLaterOp *last;
} later_op;
-#endif
-#ifdef ERTS_USE_ASYNC_READY_Q
struct {
-#ifdef ERTS_SMP
int need_thr_prgr;
ErtsThrPrgrVal thr_prgr;
-#endif
void *queue;
} async_ready;
-#endif
-#ifdef ERTS_SMP
struct {
Uint64 next;
int *sched2jix;
int jix;
ErtsDelayedAuxWorkWakeupJob *job;
} delayed_wakeup;
-#endif
+ struct {
+ ErtsAlcuBlockscanYieldData alcu_blockscan;
+ ErtsEtsAllYieldData ets_all;
+ /* Other yielding operations... */
+ } yield;
struct {
struct {
erts_aint32_t flags;
@@ -609,20 +608,15 @@ typedef struct {
} debug;
} ErtsAuxWorkData;
-#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_SCHED_AUX_YIELD_DATA(ESDP, NAME) \
+ (&(ESDP)->aux_work_data.yield.NAME)
+void erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp);
+
typedef enum {
ERTS_DIRTY_CPU_SCHEDULER,
ERTS_DIRTY_IO_SCHEDULER
} ErtsDirtySchedulerType;
-typedef union {
- struct {
- ErtsDirtySchedulerType type: 1;
- Uint num: sizeof(Uint)*8 - 1;
- } s;
- Uint no;
-} ErtsDirtySchedId;
-#endif
struct ErtsSchedulerData_ {
/*
@@ -636,20 +630,18 @@ struct ErtsSchedulerData_ {
ErtsTimerWheel *timer_wheel;
ErtsNextTimeoutRef next_tmo_ref;
ErtsHLTimerService *timer_service;
-#ifdef ERTS_SMP
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */
Process *free_process;
ErtsThrPrgrData thr_progress_data;
-#endif
ErtsSchedulerSleepInfo *ssi;
Process *current_process;
+ ErtsSchedType type;
Uint no; /* Scheduler number for normal schedulers */
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */
+ Uint dirty_no; /* Scheduler number for dirty schedulers */
+ struct enif_environment_t *current_nif;
Process *dirty_shadow_process;
-#endif
Port *current_port;
ErtsRunQueue *run_queue;
int virtual_reds;
@@ -670,12 +662,19 @@ struct ErtsSchedulerData_ {
Uint64 out;
Uint64 in;
} io;
+ struct {
+ ErtsSignal* sig;
+ Eterm to;
+#ifdef DEBUG
+ Process* dbg_from;
+#endif
+ } pending_signal;
Uint64 reductions;
ErtsSchedWallTime sched_wall_time;
ErtsGCInfo gc_info;
ErtsPortTaskHandle nosuspend_port_task_handle;
-
+ ErtsEtsTables ets_tables;
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_alloc_verify_func_t verify_unused_temp_alloc;
Allctr_t *verify_unused_temp_alloc_data;
@@ -687,26 +686,24 @@ typedef union {
char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsSchedulerData))];
} ErtsAlignedSchedulerData;
-extern ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
-#ifdef ERTS_DIRTY_SCHEDULERS
-extern ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data;
-extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
-#endif
+extern ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_scheduler_data);
+extern ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_cpu_scheduler_data);
+extern ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_io_scheduler_data);
-#ifndef ERTS_SMP
-extern ErtsSchedulerData *erts_scheduler_data;
-#endif
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
-int erts_smp_lc_runq_is_locked(ErtsRunQueue *);
+#if defined(ERTS_ENABLE_LOCK_CHECK)
+int erts_lc_runq_is_locked(ErtsRunQueue *);
#endif
+void
+erts_debug_later_op_foreach(void (*callback)(void*),
+ void (*func)(void *, ErtsThrPrgrVal, void *),
+ void *arg);
+
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-#ifdef ERTS_SMP
void erts_empty_runq(ErtsRunQueue *rq);
void erts_non_empty_runq(ErtsRunQueue *rq);
-#endif
/*
@@ -714,86 +711,84 @@ void erts_non_empty_runq(ErtsRunQueue *rq);
* other threads peek at values without run queue lock.
*/
-ERTS_GLB_INLINE void erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio);
-ERTS_GLB_INLINE void erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio);
-ERTS_GLB_INLINE void erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi);
+ERTS_GLB_INLINE void erts_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio);
+ERTS_GLB_INLINE void erts_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio);
+ERTS_GLB_INLINE void erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
+erts_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
{
erts_aint32_t len;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
- len = erts_smp_atomic32_read_dirty(&rq->len);
+ len = erts_atomic32_read_dirty(&rq->len);
-#ifdef ERTS_SMP
if (len == 0)
erts_non_empty_runq(rq);
-#endif
len++;
if (rq->max_len < len)
rq->max_len = len;
ASSERT(len > 0);
- erts_smp_atomic32_set_nob(&rq->len, len);
+ erts_atomic32_set_nob(&rq->len, len);
- len = erts_smp_atomic32_read_dirty(&rqi->len);
+ len = erts_atomic32_read_dirty(&rqi->len);
ASSERT(len >= 0);
if (len == 0) {
- ASSERT((erts_smp_atomic32_read_nob(&rq->flags)
+ ASSERT((erts_atomic32_read_nob(&rq->flags)
& ((erts_aint32_t) (1 << prio))) == 0);
- erts_smp_atomic32_read_bor_nob(&rq->flags,
+ erts_atomic32_read_bor_nob(&rq->flags,
(erts_aint32_t) (1 << prio));
}
len++;
if (rqi->max_len < len)
rqi->max_len = len;
- erts_smp_atomic32_set_relb(&rqi->len, len);
+ erts_atomic32_set_relb(&rqi->len, len);
}
ERTS_GLB_INLINE void
-erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
+erts_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
{
erts_aint32_t len;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
- len = erts_smp_atomic32_read_dirty(&rq->len);
+ len = erts_atomic32_read_dirty(&rq->len);
len--;
ASSERT(len >= 0);
- erts_smp_atomic32_set_nob(&rq->len, len);
+ erts_atomic32_set_nob(&rq->len, len);
- len = erts_smp_atomic32_read_dirty(&rqi->len);
+ len = erts_atomic32_read_dirty(&rqi->len);
len--;
ASSERT(len >= 0);
if (len == 0) {
- ASSERT((erts_smp_atomic32_read_nob(&rq->flags)
+ ASSERT((erts_atomic32_read_nob(&rq->flags)
& ((erts_aint32_t) (1 << prio))));
- erts_smp_atomic32_read_band_nob(&rq->flags,
+ erts_atomic32_read_band_nob(&rq->flags,
~((erts_aint32_t) (1 << prio)));
}
- erts_smp_atomic32_set_relb(&rqi->len, len);
+ erts_atomic32_set_relb(&rqi->len, len);
}
ERTS_GLB_INLINE void
-erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
+erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
{
erts_aint32_t len;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
- len = erts_smp_atomic32_read_dirty(&rqi->len);
+ len = erts_atomic32_read_dirty(&rqi->len);
ASSERT(rqi->max_len >= len);
rqi->max_len = len;
}
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#define RUNQ_READ_LEN(X) erts_smp_atomic32_read_nob((X))
+#define RUNQ_READ_LEN(X) erts_atomic32_read_nob((X))
#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
@@ -809,14 +804,18 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_CALL_TIME_BP 3
#define ERTS_PSD_DELAYED_GC_TASK_QS 4
#define ERTS_PSD_NIF_TRAP_EXPORT 5
-#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 6
+#define ERTS_PSD_ETS_OWNED_TABLES 6
+#define ERTS_PSD_ETS_FIXED_TABLES 7
+#define ERTS_PSD_DIST_ENTRY 8
+#define ERTS_PSD_PENDING_SUSPEND 9
+#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 10 /* keep last... */
-#define ERTS_PSD_SIZE 7
+#define ERTS_PSD_SIZE 11
#if !defined(HIPE)
# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
# undef ERTS_PSD_SIZE
-# define ERTS_PSD_SIZE 6
+# define ERTS_PSD_SIZE 10
#endif
typedef struct {
@@ -841,8 +840,20 @@ typedef struct {
#define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ((ErtsProcLocks) 0)
-#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ((ErtsProcLocks) 0)
+#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
+#define ERTS_PSD_ETS_OWNED_TABLES_GET_LOCKS ERTS_PROC_LOCK_STATUS
+#define ERTS_PSD_ETS_OWNED_TABLES_SET_LOCKS ERTS_PROC_LOCK_STATUS
+
+#define ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
+#define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
+#define ERTS_PSD_PENDING_SUSPEND_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_PENDING_SUSPEND_SET_LOCKS ERTS_PROC_LOCK_MAIN
typedef struct {
ErtsProcLocks get_locks;
@@ -858,7 +869,7 @@ extern ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
#define ERTS_SCHED_STAT_MODIFY_CLEAR 3
typedef struct {
- erts_smp_spinlock_t lock;
+ erts_spinlock_t lock;
int enabled;
struct {
Eterm name;
@@ -879,22 +890,6 @@ typedef struct {
typedef struct ErtsProcSysTask_ ErtsProcSysTask;
typedef struct ErtsProcSysTaskQs_ ErtsProcSysTaskQs;
-#ifdef ERTS_SMP
-
-typedef struct ErtsPendingSuspend_ ErtsPendingSuspend;
-struct ErtsPendingSuspend_ {
- ErtsPendingSuspend *next;
- ErtsPendingSuspend *end;
- Eterm pid;
- void (*handle_func)(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm pid);
-};
-
-#endif
-
-
/* Defines to ease the change of memory architecture */
# define HEAP_START(p) (p)->heap
# define HEAP_TOP(p) (p)->htop
@@ -945,9 +940,11 @@ struct process {
Eterm* stop; /* Stack top */
Eterm* heap; /* Heap start */
Eterm* hend; /* Heap end */
+ Eterm* abandoned_heap;
Uint heap_sz; /* Size of heap in words */
Uint min_heap_size; /* Minimum size of heap (in words). */
Uint min_vheap_size; /* Minimum size of virtual heap (in words). */
+ Uint max_heap_size; /* Maximum size of heap (in words). */
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
volatile unsigned long fp_exception;
@@ -960,16 +957,6 @@ struct process {
#endif
/*
- * Moved to after "struct hipe_process_state hipe", as a temporary fix for
- * LLVM hard-coding offsetof(struct process, hipe.nstack) (sic!)
- * (see void X86FrameLowering::adjustForHiPEPrologue(...) in
- * lib/Target/X86/X86FrameLowering.cpp).
- *
- * Used to be below "Eterm* hend".
- */
- Eterm* abandoned_heap;
-
- /*
* Saved x registers.
*/
Uint arity; /* Number of live argument registers (only valid
@@ -997,18 +984,8 @@ struct process {
Process *next; /* Pointer to next process in run queue */
- struct ErtsNodesMonitor_ *nodes_monitors;
-
- ErtsSuspendMonitor *suspend_monitors; /* Processes suspended by
- this process via
- erlang:suspend_process/1 */
-
- ErlMessageQueue msg; /* Message queue */
-
+ ErtsSignalPrivQueues sig_qs; /* Signal queues */
ErtsBifTimers *bif_timers; /* Bif timers aiming at this process */
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- ErtsBifTimers *accessor_bif_timers; /* Accessor bif timers */
-#endif
ProcDict *dictionary; /* Process dictionary, may be NULL */
@@ -1022,20 +999,20 @@ struct process {
#endif
union {
void *terminate;
- BeamInstr initial[3]; /* Initial module(0), function(1), arity(2), often used instead
- of pointer to funcinfo instruction, hence the BeamInstr datatype */
+ ErtsCodeMFA initial; /* Initial module(0), function(1), arity(2),
+ often used instead of pointer to funcinfo
+ instruction. */
} u;
- BeamInstr* current; /* Current Erlang function, part of the funcinfo:
+ ErtsCodeMFA* current; /* Current Erlang function, part of the funcinfo:
* module(0), function(1), arity(2)
* (module and functions are tagged atoms;
- * arity an untagged integer). BeamInstr * because it references code
+ * arity an untagged integer).
*/
-
+
/*
* Information mainly for post-mortem use (erl crash dump).
*/
Eterm parent; /* Pid of process that created this process. */
- erts_approx_time_t approx_started; /* Time when started. */
Uint32 static_flags; /* Flags that do *not* change */
@@ -1047,7 +1024,6 @@ struct process {
Eterm *old_hend; /* Heap pointers for generational GC. */
Eterm *old_htop;
Eterm *old_heap;
- Uint max_heap_size; /* Maximum size of heap (in words). */
Uint16 gen_gcs; /* Number of (minor) generational GCs. */
Uint16 max_gen_gcs; /* Max minor gen GCs before fullsweep. */
ErlOffHeap off_heap; /* Off-heap data updated by copy_struct(). */
@@ -1055,32 +1031,23 @@ struct process {
ErlHeapFragment* live_hf_end;
ErtsMessage *msg_frag; /* Pointer to message fragment list */
Uint mbuf_sz; /* Total size of heap fragments and message fragments */
- erts_smp_atomic_t psd; /* Rarely used process specific data */
+ erts_atomic_t psd; /* Rarely used process specific data */
Uint64 bin_vheap_sz; /* Virtual heap block size for binaries */
Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */
Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
ErtsProcSysTaskQs *sys_task_qs;
+ ErtsProcSysTask *dirty_sys_tasks;
- erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_atomic32_t dirty_state; /* Process dirty state flags (see ERTS_PDSFLG_*) */
-#endif
+ erts_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
+ erts_atomic32_t dirty_state; /* Process dirty state flags (see ERTS_PDSFLG_*) */
-#ifdef ERTS_SMP
- ErlMessageInQueue msg_inq;
+ ErtsSignalInQueue sig_inq;
ErlTraceMessageQueue *trace_msg_q;
- ErtsPendExit pending_exit;
erts_proc_lock_t lock;
ErtsSchedulerData *scheduler_data;
- Eterm suspendee;
- ErtsPendingSuspend *pending_suspenders;
- erts_smp_atomic_t run_queue;
-#ifdef HIPE
- struct hipe_process_state_smp hipe_smp;
-#endif
-#endif
+ erts_atomic_t run_queue;
#ifdef CHECK_FOR_HOLES
Eterm* last_htop; /* No need to scan the heap below this point. */
@@ -1106,6 +1073,7 @@ struct process {
#endif
};
+extern Eterm erts_init_process_id; /* pid of init process */
extern const Process erts_invalid_process;
#ifdef CHECK_FOR_HOLES
@@ -1182,20 +1150,20 @@ void erts_check_for_holes(Process* p);
#define ERTS_PSFLG_IN_PRQ_LOW ERTS_PSFLG_BIT(3)
#define ERTS_PSFLG_FREE ERTS_PSFLG_BIT(4)
#define ERTS_PSFLG_EXITING ERTS_PSFLG_BIT(5)
-#define ERTS_PSFLG_PENDING_EXIT ERTS_PSFLG_BIT(6)
+#define ERTS_PSFLG_UNUSED ERTS_PSFLG_BIT(6)
#define ERTS_PSFLG_ACTIVE ERTS_PSFLG_BIT(7)
#define ERTS_PSFLG_IN_RUNQ ERTS_PSFLG_BIT(8)
#define ERTS_PSFLG_RUNNING ERTS_PSFLG_BIT(9)
#define ERTS_PSFLG_SUSPENDED ERTS_PSFLG_BIT(10)
#define ERTS_PSFLG_GC ERTS_PSFLG_BIT(11)
-#define ERTS_PSFLG_BOUND ERTS_PSFLG_BIT(12)
-#define ERTS_PSFLG_TRAP_EXIT ERTS_PSFLG_BIT(13)
+#define ERTS_PSFLG_SYS_TASKS ERTS_PSFLG_BIT(12)
+#define ERTS_PSFLG_SIG_IN_Q ERTS_PSFLG_BIT(13)
#define ERTS_PSFLG_ACTIVE_SYS ERTS_PSFLG_BIT(14)
#define ERTS_PSFLG_RUNNING_SYS ERTS_PSFLG_BIT(15)
#define ERTS_PSFLG_PROXY ERTS_PSFLG_BIT(16)
#define ERTS_PSFLG_DELAYED_SYS ERTS_PSFLG_BIT(17)
#define ERTS_PSFLG_OFF_HEAP_MSGQ ERTS_PSFLG_BIT(18)
-#define ERTS_PSFLG_ON_HEAP_MSGQ ERTS_PSFLG_BIT(19)
+#define ERTS_PSFLG_SIG_Q ERTS_PSFLG_BIT(19)
#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(20)
#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(21)
#define ERTS_PSFLG_DIRTY_ACTIVE_SYS ERTS_PSFLG_BIT(22)
@@ -1214,7 +1182,6 @@ void erts_check_for_holes(Process* p);
| ERTS_PSFLG_IN_PRQ_LOW)
#define ERTS_PSFLGS_VOLATILE_HEAP (ERTS_PSFLG_EXITING \
- | ERTS_PSFLG_PENDING_EXIT \
| ERTS_PSFLG_DIRTY_RUNNING \
| ERTS_PSFLG_DIRTY_RUNNING_SYS)
@@ -1225,7 +1192,6 @@ void erts_check_for_holes(Process* p);
#define ERTS_PSFLGS_GET_PRQ_PRIO(PSFLGS) \
(((PSFLGS) >> ERTS_PSFLGS_PRQ_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
-#ifdef ERTS_DIRTY_SCHEDULERS
/*
* Flags in the dirty_state field.
@@ -1252,7 +1218,6 @@ void erts_check_for_holes(Process* p);
| ERTS_PDSFLG_IN_CPU_PRQ_HIGH \
| ERTS_PDSFLG_IN_CPU_PRQ_NORMAL\
| ERTS_PDSFLG_IN_CPU_PRQ_LOW)
-#endif
/*
@@ -1281,7 +1246,24 @@ void erts_check_for_holes(Process* p);
#define SEQ_TRACE_T_SENDER(token) (*(tuple_val(token) + 4))
#define SEQ_TRACE_T_LASTCNT(token) (*(tuple_val(token) + 5))
+#ifdef USE_VM_PROBES
+/* The dtrace probe for seq_trace only supports 'int' labels, so we represent
+ * all values that won't fit into a 32-bit signed integer as ERTS_SINT32_MIN
+ * (bigints, tuples, etc). */
+
+#define SEQ_TRACE_T_DTRACE_LABEL(token) \
+ DTRACE_SEQ_TRACE_LABEL__(SEQ_TRACE_T_LABEL(token))
+
+#define DTRACE_SEQ_TRACE_LABEL__(label_term) \
+ (is_small((label_term)) ? \
+ ((signed_val((label_term)) <= ERTS_SINT32_MAX && \
+ signed_val((label_term)) >= ERTS_SINT32_MIN) ? \
+ signed_val((label_term)) : ERTS_SINT32_MIN) \
+ : ERTS_SINT32_MIN)
+#endif
+
/*
+
* Possible flags for the flags field in ErlSpawnOpts below.
*/
@@ -1292,7 +1274,7 @@ void erts_check_for_holes(Process* p);
#define SPO_OFF_HEAP_MSGQ 16
#define SPO_ON_HEAP_MSGQ 32
-extern int erts_default_spo_flags;
+extern int ERTS_WRITE_UNLIKELY(erts_default_spo_flags);
/*
* The following struct contains options for a process to be spawned.
@@ -1343,15 +1325,15 @@ Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra);
Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz);
#endif
-extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx;
+extern erts_rwmtx_t erts_cpu_bind_rwmtx;
/* If any of the erts_system_monitor_* variables are set (enabled),
** erts_system_monitor must be != NIL, to allow testing on just
** the erts_system_monitor_* variables.
*/
-extern Eterm erts_system_monitor;
-extern Uint erts_system_monitor_long_gc;
-extern Uint erts_system_monitor_long_schedule;
-extern Uint erts_system_monitor_large_heap;
+extern Eterm ERTS_WRITE_UNLIKELY(erts_system_monitor);
+extern Uint ERTS_WRITE_UNLIKELY(erts_system_monitor_long_gc);
+extern Uint ERTS_WRITE_UNLIKELY(erts_system_monitor_long_schedule);
+extern Uint ERTS_WRITE_UNLIKELY(erts_system_monitor_large_heap);
struct erts_system_monitor_flags_t {
unsigned int busy_port : 1;
unsigned int busy_dist_port : 1;
@@ -1382,18 +1364,29 @@ extern int erts_system_profile_ts_type;
#define F_DISTRIBUTION (1 << 6) /* Process used in distribution */
#define F_USING_DDLL (1 << 7) /* Process has used the DDLL interface */
#define F_HAVE_BLCKD_MSCHED (1 << 8) /* Process has blocked multi-scheduling */
-#define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */
+#define F_UNUSED (1 << 9)
#define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */
#define F_DISABLE_GC (1 << 11) /* Disable GC (see below) */
#define F_OFF_HEAP_MSGQ (1 << 12) /* Off heap msg queue */
-#define F_ON_HEAP_MSGQ (1 << 13) /* Off heap msg queue */
+#define F_ON_HEAP_MSGQ (1 << 13) /* On heap msg queue */
#define F_OFF_HEAP_MSGQ_CHNG (1 << 14) /* Off heap msg queue changing */
#define F_ABANDONED_HEAP_USE (1 << 15) /* Have usage of abandoned heap */
#define F_DELAY_GC (1 << 16) /* Similar to disable GC (see below) */
#define F_SCHDLR_ONLN_WAITQ (1 << 17) /* Process enqueued waiting to change schedulers online */
#define F_HAVE_BLCKD_NMSCHED (1 << 18) /* Process has blocked normal multi-scheduling */
-#define F_HIPE_MODE (1 << 19)
+#define F_HIPE_MODE (1 << 19) /* Process is executing in HiPE mode */
#define F_DELAYED_DEL_PROC (1 << 20) /* Delay delete process (dirty proc exit case) */
+#define F_DIRTY_CLA (1 << 21) /* Dirty copy literal area scheduled */
+#define F_DIRTY_GC_HIBERNATE (1 << 22) /* Dirty GC hibernate scheduled */
+#define F_DIRTY_MAJOR_GC (1 << 23) /* Dirty major GC scheduled */
+#define F_DIRTY_MINOR_GC (1 << 24) /* Dirty minor GC scheduled */
+#define F_HIBERNATED (1 << 25) /* Hibernated */
+#define F_LOCAL_SIGS_ONLY (1 << 26) /* Handle privq sigs only */
+#define F_TRAP_EXIT (1 << 27) /* Trapping exit */
+#define F_DEFERRED_SAVED_LAST (1 << 28) /* Deferred sig_qs.saved_last */
+#define F_DELAYED_PSIGQS_LEN (1 << 29) /* Delayed update of sig_qs.len */
+#define F_HIPE_RECV_LOCKED (1 << 30) /* HiPE message queue locked */
+#define F_HIPE_RECV_YIELD (1 << 31) /* HiPE receive yield */
/*
* F_DISABLE_GC and F_DELAY_GC are similar. Both will prevent
@@ -1459,7 +1452,7 @@ extern int erts_system_profile_ts_type;
| F_TRACE_ARITY_ONLY | F_TRACE_RETURN_TO \
| F_TRACE_SILENT | F_TRACE_SCHED_PROCS | F_TRACE_PORTS \
| F_TRACE_SCHED_PORTS | F_TRACE_SCHED_NO \
- | F_TRACE_SCHED_EXIT)
+ | F_TRACE_SCHED_EXIT )
#define ERTS_TRACEE_MODIFIER_FLAGS \
@@ -1472,6 +1465,14 @@ extern int erts_system_profile_ts_type;
#define SEQ_TRACE_FLAG(N) (1 << (ERTS_TRACE_TS_TYPE_BITS + (N)))
+#define ERTS_SIG_ENABLE_TRACE_FLAGS \
+ ( F_TRACE_RECEIVE | F_TRACE_PROCS)
+
+/*
+ * F_TRACE_RECEIVE is always enabled/disable via signaling.
+ * F_TRACE_PROCS enable/disable F_TRACE_PROCS_SIG via signaling.
+ */
+
/* Sequential trace flags */
/* SEQ_TRACE_TIMESTAMP_MASK is a bit-field */
@@ -1498,10 +1499,6 @@ extern int erts_system_profile_ts_type;
#define DT_UTAG_FLAGS(P) ((P)->dt_utag_flags)
#endif
-/* Option flags to erts_send_exit_signal() */
-#define ERTS_XSIG_FLG_IGN_KILL (((Uint32) 1) << 0)
-#define ERTS_XSIG_FLG_NO_IGN_NORMAL (((Uint32) 1) << 1)
-
#define CANCEL_TIMER(P) \
do { \
if ((P)->flags & (F_INSLPQUEUE|F_TIMO)) { \
@@ -1512,84 +1509,58 @@ extern int erts_system_profile_ts_type;
} \
} while (0)
-#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
-#define ERTS_NUM_DIRTY_RUNQS 2
-#else
-#define ERTS_NUM_DIRTY_RUNQS 0
-#endif
+#define ERTS_NUM_DIRTY_CPU_RUNQS 1
+#define ERTS_NUM_DIRTY_IO_RUNQS 1
+
+#define ERTS_NUM_DIRTY_RUNQS (ERTS_NUM_DIRTY_CPU_RUNQS+ERTS_NUM_DIRTY_IO_RUNQS)
#define ERTS_RUNQ_IX(IX) \
- (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues), \
+ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \
&erts_aligned_run_queues[(IX)].runq)
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_RUNQ_IX_IS_DIRTY(IX) \
- (-(ERTS_NUM_DIRTY_RUNQS) <= (IX) && (IX) < 0)
+ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \
+ (erts_no_run_queues <= (IX)))
#define ERTS_DIRTY_RUNQ_IX(IX) \
(ASSERT(ERTS_RUNQ_IX_IS_DIRTY(IX)), \
&erts_aligned_run_queues[(IX)].runq)
-#define ERTS_DIRTY_CPU_RUNQ (&erts_aligned_run_queues[-1].runq)
-#define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[-2].runq)
-#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ)->ix == -1)
-#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ)->ix == -2)
-#else
-#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
-#endif
+#define ERTS_DIRTY_CPU_RUNQ (&erts_aligned_run_queues[erts_no_run_queues].runq)
+#define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[erts_no_run_queues+1].runq)
+#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ) == ERTS_DIRTY_CPU_RUNQ)
+#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ) == ERTS_DIRTY_IO_RUNQ)
#define ERTS_SCHEDULER_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_schedulers), \
&erts_aligned_scheduler_data[(IX)].esd)
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_DIRTY_CPU_SCHEDULER_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_dirty_cpu_schedulers), \
&erts_aligned_dirty_cpu_scheduler_data[(IX)].esd)
#define ERTS_DIRTY_IO_SCHEDULER_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_dirty_io_schedulers), \
&erts_aligned_dirty_io_scheduler_data[(IX)].esd)
-#define ERTS_DIRTY_SCHEDULER_NO(ESDP) \
- ((ESDP)->dirty_no.s.num)
-#define ERTS_DIRTY_SCHEDULER_TYPE(ESDP) \
- ((ESDP)->dirty_no.s.type)
-#ifdef ERTS_SMP
#define ERTS_SCHEDULER_IS_DIRTY(ESDP) \
- ((ESDP)->dirty_no.s.num != 0)
+ ((ESDP)->type != ERTS_SCHED_NORMAL)
#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) \
- (ERTS_SCHEDULER_IS_DIRTY((ESDP)) & ((ESDP)->dirty_no.s.type == 0))
+ ((ESDP)->type == ERTS_SCHED_DIRTY_CPU)
#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) \
- (ERTS_SCHEDULER_IS_DIRTY((ESDP)) & ((ESDP)->dirty_no.s.type == 1))
-#else
-#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
-#endif
-#else
-#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
-#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
-#endif
+ ((ESDP)->type == ERTS_SCHED_DIRTY_IO)
void erts_pre_init_process(void);
void erts_late_init_process(void);
void erts_early_init_scheduling(int);
-void erts_init_scheduling(int, int
-#ifdef ERTS_DIRTY_SCHEDULERS
- , int, int, int
-#endif
- );
-
+void erts_init_scheduling(int, int, int, int, int, int);
+void erts_execute_dirty_system_task(Process *c_p);
int erts_set_gc_state(Process *c_p, int enable);
-Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable);
+Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable,
+ int dirty_cpu, int want_dirty_io);
Eterm erts_system_check_request(Process *c_p);
Eterm erts_gc_info_request(Process *c_p);
Uint64 erts_get_proc_interval(void);
Uint64 erts_ensure_later_proc_interval(Uint64);
Uint64 erts_step_proc_interval(void);
-int erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj); /* see erl_nif.c */
-void erts_destroy_nif_export(void *); /* see erl_nif.c */
-
ErtsProcList *erts_proclist_create(Process *);
ErtsProcList *erts_proclist_copy(ErtsProcList *);
void erts_proclist_destroy(ErtsProcList *);
+void erts_proclist_dump(fmtfn_t to, void *to_arg, ErtsProcList*);
ERTS_GLB_INLINE int erts_proclist_same(ErtsProcList *, Process *);
ERTS_GLB_INLINE void erts_proclist_store_first(ErtsProcList **, ErtsProcList *);
@@ -1682,7 +1653,7 @@ ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **list)
return NULL;
else {
ErtsProcList *res = *list;
- if (res == *list)
+ if (res->next == *list)
*list = NULL;
else
*list = res->next;
@@ -1757,9 +1728,9 @@ ERTS_GLB_INLINE int erts_proclist_is_last(ErtsProcList *list,
#endif
-int erts_sched_set_wakeup_other_thresold(char *str);
-int erts_sched_set_wakeup_other_type(char *str);
-int erts_sched_set_busy_wait_threshold(char *str);
+int erts_sched_set_wakeup_other_threshold(ErtsSchedType sched_type, char *str);
+int erts_sched_set_wakeup_other_type(ErtsSchedType sched_type, char *str);
+int erts_sched_set_busy_wait_threshold(ErtsSchedType sched_type, char *str);
int erts_sched_set_wake_cleanup_threshold(char *);
void erts_schedule_thr_prgr_later_op(void (*)(void *),
@@ -1770,17 +1741,17 @@ void erts_schedule_thr_prgr_later_cleanup_op(void (*)(void *),
ErtsThrPrgrLaterOp *,
UWord);
void erts_schedule_complete_off_heap_message_queue_change(Eterm pid);
+struct db_fixation;
+void erts_schedule_ets_free_fixation(Eterm pid, struct db_fixation*);
void erts_schedule_flush_trace_messages(Process *proc, int force_on_proc);
int erts_flush_trace_messages(Process *c_p, ErtsProcLocks locks);
+int erts_sig_prio(Eterm pid, int prio);
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int erts_dbg_check_halloc_lock(Process *p);
#endif
-#if defined(ERTS_SMP) || defined(ERTS_DIRTY_SCHEDULERS)
void
erts_schedulers_state(Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, Uint *);
-#endif
-#ifdef ERTS_SMP
ErtsSchedSuspendResult
erts_set_schedulers_online(Process *p,
ErtsProcLocks plocks,
@@ -1795,14 +1766,9 @@ void erts_start_schedulers(void);
void erts_alloc_notify_delayed_dealloc(int);
void erts_alloc_ensure_handle_delayed_dealloc_call(int);
void erts_notify_canceled_timer(ErtsSchedulerData *, int);
-#endif
-#if ERTS_USE_ASYNC_READY_Q
void erts_notify_check_async_ready_queue(void *);
-#endif
-#ifdef ERTS_SMP
void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later);
void erts_notify_finish_breakpointing(Process* p);
-#endif
void erts_schedule_misc_aux_work(int sched_id,
void (*func)(void *),
void *arg);
@@ -1811,12 +1777,13 @@ void erts_schedule_multi_misc_aux_work(int ignore_self,
void (*func)(void *),
void *arg);
erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
+void erts_aux_work_timeout_late_init(ErtsSchedulerData *esdp);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
void erts_init_process(int, int, int);
Eterm erts_process_state2status(erts_aint32_t);
Eterm erts_process_status(Process *, Eterm);
-Uint erts_run_queues_len(Uint *, int, int);
+Uint erts_run_queues_len(Uint *, int, int, int);
void erts_add_to_runq(Process *);
Eterm erts_bound_schedulers_term(Process *c_p);
Eterm erts_get_cpu_topology_term(Process *c_p, Eterm which);
@@ -1827,8 +1794,10 @@ ErtsRunQueue *erts_schedid2runq(Uint);
Process *erts_schedule(ErtsSchedulerData *, Process*, int);
void erts_schedule_misc_op(void (*)(void *), void *);
Eterm erl_create_process(Process*, Eterm, Eterm, Eterm, ErlSpawnOpts*);
+void erts_set_self_exiting(Process *, Eterm);
void erts_do_exit_process(Process*, Eterm);
void erts_continue_exit_process(Process *);
+void erts_proc_exit_link(Process *, ErtsLink *, Uint16, Eterm, Eterm);
/* Begin System profile */
Uint erts_runnable_process_count(void);
/* End System profile */
@@ -1837,14 +1806,26 @@ void erts_cleanup_empty_process(Process* p);
#ifdef DEBUG
void erts_debug_verify_clean_empty_process(Process* p);
#endif
-void erts_stack_dump(int to, void *to_arg, Process *);
-void erts_limited_stack_trace(int to, void *to_arg, Process *);
-void erts_program_counter_info(int to, void *to_arg, Process *);
-void erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp);
-void erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg);
-void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg);
-
-Eterm erts_get_process_priority(Process *p);
+void erts_stack_dump(fmtfn_t to, void *to_arg, Process *);
+void erts_limited_stack_trace(fmtfn_t to, void *to_arg, Process *);
+void erts_program_counter_info(fmtfn_t to, void *to_arg, Process *);
+void erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp);
+void erts_print_run_queue_info(fmtfn_t, void *to_arg, ErtsRunQueue*);
+void erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg);
+void erts_dump_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg);
+Eterm erts_process_info(Process *c_p, ErtsHeapFactory *hfact,
+ Process *rp, ErtsProcLocks rp_locks,
+ int *item_ix, int item_ix_len,
+ int flags, Uint reserve_size, Uint *reds);
+
+typedef struct {
+ Process *c_p;
+ Eterm reason;
+} ErtsProcExitContext;
+void erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt);
+void erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt);
+
+Eterm erts_get_process_priority(erts_aint32_t state);
Eterm erts_set_process_priority(Process *p, Eterm prio);
Uint erts_get_total_context_switches(void);
@@ -1862,23 +1843,7 @@ void erts_suspend(Process*, ErtsProcLocks, Port*);
void erts_resume(Process*, ErtsProcLocks);
int erts_resume_processes(ErtsProcList *);
-int erts_send_exit_signal(Process *,
- Eterm,
- Process *,
- ErtsProcLocks *,
- Eterm,
- Eterm,
- Process *,
- Uint32);
-#ifdef ERTS_SMP
-void erts_handle_pending_exit(Process *, ErtsProcLocks);
-#define ERTS_PROC_PENDING_EXIT(P) \
- (ERTS_PSFLG_PENDING_EXIT & erts_smp_atomic32_read_acqb(&(P)->state))
-#else
-#define ERTS_PROC_PENDING_EXIT(P) 0
-#endif
-
-void erts_deep_process_dump(int, void *);
+void erts_deep_process_dump(fmtfn_t, void *);
Eterm erts_get_reader_groups_map(Process *c_p);
Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
@@ -1890,7 +1855,7 @@ Uint erts_debug_nbalance(void);
int erts_debug_wait_completed(Process *c_p, int flags);
-Uint erts_process_memory(Process *c_p, int incl_msg_inq);
+Uint erts_process_memory(Process *c_p, int include_sigs_in_transit);
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
# define ERTS_VERIFY_UNUSED_TEMP_ALLOC(P) \
@@ -1906,49 +1871,68 @@ do { \
# define ERTS_VERIFY_UNUSED_TEMP_ALLOC(ESDP)
#endif
-#if defined(ERTS_SMP) || defined(USE_THREADS)
ErtsSchedulerData *erts_get_scheduler_data(void);
-#else
-ERTS_GLB_INLINE ErtsSchedulerData *erts_get_scheduler_data(void);
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE
-ErtsSchedulerData *erts_get_scheduler_data(void)
-{
- return erts_scheduler_data;
-}
-#endif
-#endif
void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks);
+erts_aint32_t erts_proc_sys_schedule(Process *p, erts_aint32_t state,
+ erts_aint32_t enable_flag);
ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks);
+ERTS_GLB_INLINE void erts_schedule_dirty_sys_execution(Process *c_p);
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
erts_proc_notify_new_message(Process *p, ErtsProcLocks locks)
{
/* No barrier needed, due to msg lock */
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
erts_schedule_process(p, state, locks);
}
+
+ERTS_GLB_INLINE void
+erts_schedule_dirty_sys_execution(Process *c_p)
+{
+ erts_aint32_t a, n, e;
+
+ a = erts_atomic32_read_nob(&c_p->state);
+
+ /*
+ * Only a currently executing process schedules
+ * itself for dirty-sys execution...
+ */
+
+ ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+
+ /* Don't set dirty-active-sys if we are about to exit... */
+
+ while (!(a & (ERTS_PSFLG_DIRTY_ACTIVE_SYS
+ | ERTS_PSFLG_EXITING))) {
+ e = a;
+ n = a | ERTS_PSFLG_DIRTY_ACTIVE_SYS;
+ a = erts_atomic32_cmpxchg_mb(&c_p->state, n, e);
+ if (a == e)
+ break; /* dirty-active-sys set */
+ }
+}
+
#endif
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
#define ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__
#include "erl_process_lock.h"
#undef ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__
-#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L) \
+#define ERTS_LC_CHK_RUNQ_LOCK(RQ, L) \
do { \
if ((L)) \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked((RQ))); \
+ ERTS_LC_ASSERT(erts_lc_runq_is_locked((RQ))); \
else \
- ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked((RQ))); \
+ ERTS_LC_ASSERT(!erts_lc_runq_is_locked((RQ))); \
} while (0)
#else
-#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L)
+#define ERTS_LC_CHK_RUNQ_LOCK(RQ, L)
#endif
void *erts_psd_set_init(Process *p, int ix, void *data);
@@ -1964,22 +1948,22 @@ ERTS_GLB_INLINE void *
erts_psd_get(Process *p, int ix)
{
ErtsPSD *psd;
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].get_locks)
- ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(locks || erts_thr_progress_is_blocking());
else {
locks &= erts_psd_required_locks[ix].get_locks;
- ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks
+ ERTS_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks
|| erts_thr_progress_is_blocking());
}
#endif
- psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd);
+ psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd);
ASSERT(0 <= ix && ix < ERTS_PSD_SIZE);
if (!psd)
return NULL;
- ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+ ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
return psd->data[ix];
}
@@ -1987,30 +1971,28 @@ ERTS_GLB_INLINE void *
erts_psd_set(Process *p, int ix, void *data)
{
ErtsPSD *psd;
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
- erts_aint32_t state = state = erts_smp_atomic32_read_nob(&p->state);
+ erts_aint32_t state = state = erts_atomic32_read_nob(&p->state);
if (!(state & ERTS_PSFLG_FREE)) {
if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks)
- ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(locks || erts_thr_progress_is_blocking());
else {
locks &= erts_psd_required_locks[ix].set_locks;
- ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks
+ ERTS_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks
|| erts_thr_progress_is_blocking());
}
}
#endif
- psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd);
+ psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd);
ASSERT(0 <= ix && ix < ERTS_PSD_SIZE);
if (psd) {
void *old;
-#ifdef ERTS_SMP
#ifdef ETHR_ORDERED_READ_DEPEND
ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
#else
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreStore);
#endif
-#endif
old = psd->data[ix];
psd->data[ix] = data;
return old;
@@ -2047,6 +2029,16 @@ erts_psd_set(Process *p, int ix, void *data)
#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, NTE) \
erts_psd_set((P), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE))
+#define ERTS_PROC_GET_DIST_ENTRY(P) \
+ ((DistEntry *) erts_psd_get((P), ERTS_PSD_DIST_ENTRY))
+#define ERTS_PROC_SET_DIST_ENTRY(P, DE) \
+ ((DistEntry *) erts_psd_set((P), ERTS_PSD_DIST_ENTRY, (void *) (DE)))
+
+#define ERTS_PROC_GET_PENDING_SUSPEND(P) \
+ ((void *) erts_psd_get((P), ERTS_PSD_PENDING_SUSPEND))
+#define ERTS_PROC_SET_PENDING_SUSPEND(P, PS) \
+ ((void *) erts_psd_set((P), ERTS_PSD_PENDING_SUSPEND, (void *) (PS)))
+
#ifdef HIPE
#define ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(P) \
((struct saved_calls *) erts_psd_get((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF))
@@ -2090,7 +2082,6 @@ erts_proc_set_error_handler(Process *p, Eterm handler)
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-#ifdef ERTS_SMP
#include "erl_thr_progress.h"
@@ -2192,7 +2183,6 @@ erts_check_emigration_need(ErtsRunQueue *c_rq, int prio)
#endif
-#endif
#endif
@@ -2201,15 +2191,20 @@ ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp);
ERTS_GLB_INLINE Process *erts_get_current_process(void);
ERTS_GLB_INLINE Eterm erts_get_current_pid(void);
ERTS_GLB_INLINE Uint erts_get_scheduler_id(void);
-ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p);
+ERTS_GLB_INLINE void erts_init_runq_proc(Process *p, ErtsRunQueue *rq, int bnd);
+ERTS_GLB_INLINE ErtsRunQueue *erts_set_runq_proc(Process *p, ErtsRunQueue *rq, int *boundp);
+ERTS_GLB_INLINE int erts_try_change_runq_proc(Process *p, ErtsRunQueue *rq);
+ERTS_GLB_INLINE ErtsRunQueue *erts_bind_runq_proc(Process *p, int bind);
+ERTS_GLB_INLINE int erts_proc_runq_is_bound(Process *p);
+ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p, int *boundp);
ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_current(ErtsSchedulerData *esdp);
-ERTS_GLB_INLINE void erts_smp_runq_lock(ErtsRunQueue *rq);
-ERTS_GLB_INLINE int erts_smp_runq_trylock(ErtsRunQueue *rq);
-ERTS_GLB_INLINE void erts_smp_runq_unlock(ErtsRunQueue *rq);
-ERTS_GLB_INLINE void erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
-ERTS_GLB_INLINE void erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
-ERTS_GLB_INLINE void erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
-ERTS_GLB_INLINE void erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
+ERTS_GLB_INLINE void erts_runq_lock(ErtsRunQueue *rq);
+ERTS_GLB_INLINE int erts_runq_trylock(ErtsRunQueue *rq);
+ERTS_GLB_INLINE void erts_runq_unlock(ErtsRunQueue *rq);
+ERTS_GLB_INLINE void erts_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
+ERTS_GLB_INLINE void erts_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
+ERTS_GLB_INLINE void erts_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
+ERTS_GLB_INLINE void erts_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
ERTS_GLB_INLINE ErtsMessage *erts_alloc_message_heap_state(Process *pp,
erts_aint32_t *psp,
@@ -2234,11 +2229,7 @@ ErtsSchedulerData *erts_proc_sched_data(Process *c_p)
{
ErtsSchedulerData *esdp;
ASSERT(c_p);
-#if !defined(ERTS_SMP)
- esdp = erts_get_scheduler_data();
-#else
esdp = c_p->scheduler_data;
-# if defined(ERTS_DIRTY_SCHEDULERS)
if (esdp) {
ASSERT(esdp == erts_get_scheduler_data());
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
@@ -2248,8 +2239,6 @@ ErtsSchedulerData *erts_proc_sched_data(Process *c_p)
ASSERT(esdp);
ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
}
-# endif
-#endif
ASSERT(esdp);
return esdp;
}
@@ -2280,124 +2269,229 @@ Eterm erts_get_current_pid(void)
ERTS_GLB_INLINE
Uint erts_get_scheduler_id(void)
{
-#ifdef ERTS_SMP
ErtsSchedulerData *esdp = erts_get_scheduler_data();
-#ifdef ERTS_DIRTY_SCHEDULERS
if (esdp && ERTS_SCHEDULER_IS_DIRTY(esdp))
return 0;
else
-#endif
return esdp ? esdp->no : (Uint) 0;
-#else
- return erts_get_scheduler_data() ? (Uint) 1 : (Uint) 0;
-#endif
}
+/**
+ * Init run-queue of process.
+ *
+ * @param p[in,out] Process
+ * @param rq[in] Run-queue that process will be assigned to
+ * @param bnd[in,out] If non-zero binds process to run-queue.
+ */
+
+ERTS_GLB_INLINE void
+erts_init_runq_proc(Process *p, ErtsRunQueue *rq, int bnd)
+{
+ erts_aint_t rqint = (erts_aint_t) rq;
+ if (bnd)
+ rqint |= ERTS_RUNQ_BOUND_FLAG;
+ erts_atomic_init_nob(&p->run_queue, rqint);
+}
+
+/**
+ * Forcibly set run-queue of process.
+ *
+ * @param p[in,out] Process
+ * @param rq[in] Run-queue that process will be assigned to
+ * @param bndp[in,out] Pointer to integer. On input non-zero
+ * value causes the process to be bound to
+ * the run-queue. On output, indicating
+ * wether process previously was bound or
+ * not.
+ * @return Previous run-queue.
+ */
+
ERTS_GLB_INLINE ErtsRunQueue *
-erts_get_runq_proc(Process *p)
+erts_set_runq_proc(Process *p, ErtsRunQueue *rq, int *bndp)
{
-#ifdef ERTS_SMP
- ASSERT(ERTS_AINT_NULL != erts_atomic_read_nob(&p->run_queue));
- return (ErtsRunQueue *) erts_atomic_read_nob(&p->run_queue);
-#else
- return ERTS_RUNQ_IX(0);
-#endif
+ erts_aint_t rqint = (erts_aint_t) rq;
+ ASSERT(bndp);
+ ASSERT(rq);
+ if (*bndp)
+ rqint |= ERTS_RUNQ_BOUND_FLAG;
+ rqint = erts_atomic_xchg_nob(&p->run_queue, rqint);
+ *bndp = (int) (rqint & ERTS_RUNQ_BOUND_FLAG);
+ return (ErtsRunQueue *) (rqint & ERTS_RUNQ_POINTER_MASK);
+}
+
+/**
+ * Try to change run-queue assignment of a process.
+ *
+ * @param p[in,out] Process
+ * @param rq[int] Run-queue that process will be assigned to
+ * @return Non-zero if the run-queue assignment was
+ * successfully changed.
+ */
+
+ERTS_GLB_INLINE int
+erts_try_change_runq_proc(Process *p, ErtsRunQueue *rq)
+{
+ erts_aint_t old_rqint, new_rqint;
+
+ ASSERT(rq);
+
+ new_rqint = (erts_aint_t) rq;
+ old_rqint = (erts_aint_t) erts_atomic_read_nob(&p->run_queue);
+ while (1) {
+ erts_aint_t act_rqint;
+
+ if (old_rqint & ERTS_RUNQ_BOUND_FLAG)
+ return 0;
+
+ act_rqint = erts_atomic_cmpxchg_nob(&p->run_queue,
+ new_rqint,
+ old_rqint);
+ if (act_rqint == old_rqint)
+ return !0;
+ }
+}
+
+/**
+ *
+ * Bind or unbind process to/from currently used run-queue.
+ *
+ * @param p Process
+ * @param bind Bind if non-zero; otherwise unbind
+ * @return Pointer to previously bound run-queue,
+ * or NULL if previously unbound
+ */
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_bind_runq_proc(Process *p, int bind)
+{
+ erts_aint_t rqint;
+ if (bind)
+ rqint = erts_atomic_read_bor_nob(&p->run_queue,
+ ERTS_RUNQ_BOUND_FLAG);
+ else
+ rqint = erts_atomic_read_band_nob(&p->run_queue,
+ ~ERTS_RUNQ_BOUND_FLAG);
+ if (rqint & ERTS_RUNQ_BOUND_FLAG)
+ return (ErtsRunQueue *) (rqint & ERTS_RUNQ_POINTER_MASK);
+ else
+ return NULL;
+}
+
+/**
+ * Determine wether a process is bound to a run-queue or not.
+ *
+ * @return Returns a non-zero value if bound,
+ * and zero of not bound.
+ */
+
+ERTS_GLB_INLINE int
+erts_proc_runq_is_bound(Process *p)
+{
+ erts_aint_t rqint = erts_atomic_read_nob(&p->run_queue);
+ return (int) (rqint & ERTS_RUNQ_BOUND_FLAG);
+}
+
+/**
+ * Set run-queue of process.
+ *
+ * @param p[in,out] Process
+ * @param bndp[out] Pointer to integer. If non-NULL pointer,
+ * the integer will be set to a non-zero
+ * value if the process is bound to the
+ * run-queue.
+ * @return Pointer to the normal run-queue that
+ * the process currently is assigend to.
+ * A process is always assigned to a
+ * normal run-queue.
+ */
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_get_runq_proc(Process *p, int *bndp)
+{
+ erts_aint_t rqint = erts_atomic_read_nob(&p->run_queue);
+ ErtsRunQueue *rq;
+ if (bndp)
+ *bndp = (int) (rqint & ERTS_RUNQ_BOUND_FLAG);
+ rqint &= ERTS_RUNQ_POINTER_MASK;
+ rq = (ErtsRunQueue *) rqint;
+ ASSERT(rq);
+ return rq;
}
ERTS_GLB_INLINE ErtsRunQueue *
erts_get_runq_current(ErtsSchedulerData *esdp)
{
ASSERT(!esdp || esdp == erts_get_scheduler_data());
-#ifdef ERTS_SMP
if (!esdp)
esdp = erts_get_scheduler_data();
return esdp->run_queue;
-#else
- return ERTS_RUNQ_IX(0);
-#endif
}
ERTS_GLB_INLINE void
-erts_smp_runq_lock(ErtsRunQueue *rq)
+erts_runq_lock(ErtsRunQueue *rq)
{
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(&rq->mtx);
-#endif
+ erts_mtx_lock(&rq->mtx);
}
ERTS_GLB_INLINE int
-erts_smp_runq_trylock(ErtsRunQueue *rq)
+erts_runq_trylock(ErtsRunQueue *rq)
{
-#ifdef ERTS_SMP
- return erts_smp_mtx_trylock(&rq->mtx);
-#else
- return 0;
-#endif
+ return erts_mtx_trylock(&rq->mtx);
}
ERTS_GLB_INLINE void
-erts_smp_runq_unlock(ErtsRunQueue *rq)
+erts_runq_unlock(ErtsRunQueue *rq)
{
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&rq->mtx);
-#endif
+ erts_mtx_unlock(&rq->mtx);
}
ERTS_GLB_INLINE void
-erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
+erts_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
{
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&rq->mtx));
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&rq->mtx));
if (xrq != rq) {
- if (erts_smp_mtx_trylock(&xrq->mtx) == EBUSY) {
+ if (erts_mtx_trylock(&xrq->mtx) == EBUSY) {
if (rq < xrq)
- erts_smp_mtx_lock(&xrq->mtx);
+ erts_mtx_lock(&xrq->mtx);
else {
- erts_smp_mtx_unlock(&rq->mtx);
- erts_smp_mtx_lock(&xrq->mtx);
- erts_smp_mtx_lock(&rq->mtx);
+ erts_mtx_unlock(&rq->mtx);
+ erts_mtx_lock(&xrq->mtx);
+ erts_mtx_lock(&rq->mtx);
}
}
}
-#endif
}
ERTS_GLB_INLINE void
-erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
+erts_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
{
-#ifdef ERTS_SMP
if (xrq != rq)
- erts_smp_mtx_unlock(&xrq->mtx);
-#endif
+ erts_mtx_unlock(&xrq->mtx);
}
ERTS_GLB_INLINE void
-erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
+erts_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
{
-#ifdef ERTS_SMP
ASSERT(rq1 && rq2);
if (rq1 == rq2)
- erts_smp_mtx_lock(&rq1->mtx);
+ erts_mtx_lock(&rq1->mtx);
else if (rq1 < rq2) {
- erts_smp_mtx_lock(&rq1->mtx);
- erts_smp_mtx_lock(&rq2->mtx);
+ erts_mtx_lock(&rq1->mtx);
+ erts_mtx_lock(&rq2->mtx);
}
else {
- erts_smp_mtx_lock(&rq2->mtx);
- erts_smp_mtx_lock(&rq1->mtx);
+ erts_mtx_lock(&rq2->mtx);
+ erts_mtx_lock(&rq1->mtx);
}
-#endif
}
ERTS_GLB_INLINE void
-erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
+erts_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
{
-#ifdef ERTS_SMP
ASSERT(rq1 && rq2);
- erts_smp_mtx_unlock(&rq1->mtx);
+ erts_mtx_unlock(&rq1->mtx);
if (rq1 != rq2)
- erts_smp_mtx_unlock(&rq2->mtx);
-#endif
+ erts_mtx_unlock(&rq2->mtx);
}
ERTS_GLB_INLINE ErtsMessage *
@@ -2429,7 +2523,7 @@ erts_alloc_message_heap(Process *pp,
Eterm **hpp,
ErlOffHeap **ohpp)
{
- erts_aint32_t state = pp ? erts_smp_atomic32_read_nob(&pp->state) : 0;
+ erts_aint32_t state = pp ? erts_atomic32_read_nob(&pp->state) : 0;
return erts_alloc_message_heap_state(pp, &state, plp, sz, hpp, ohpp);
}
@@ -2443,7 +2537,7 @@ erts_shrink_message_heap(ErtsMessage **msgpp, Process *pp,
*msgpp = erts_shrink_message(*msgpp, used_hp - start_hp,
brefs, brefs_size);
else if (!(*msgpp)->data.attached) {
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN
& erts_proc_lc_my_proc_locks(pp));
HRelease(pp, end_hp, used_hp);
}
@@ -2505,56 +2599,34 @@ ERTS_TIME2REDS_IMPL__(ErtsMonotonicTime start, ErtsMonotonicTime end)
}
#endif
-Process *erts_pid2proc_suspend(Process *,
- ErtsProcLocks,
- Eterm,
- ErtsProcLocks);
-#ifdef ERTS_SMP
-
-Process *erts_pid2proc_not_running(Process *,
- ErtsProcLocks,
- Eterm,
- ErtsProcLocks);
-Process *erts_pid2proc_nropt(Process *c_p,
- ErtsProcLocks c_p_locks,
- Eterm pid,
- ErtsProcLocks pid_locks);
-extern int erts_disable_proc_not_running_opt;
+Process *erts_try_lock_sig_free_proc(Eterm pid,
+ ErtsProcLocks locks,
+ erts_aint32_t *statep);
#ifdef DEBUG
-#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) \
+#define ERTS_ASSERT_IS_NOT_EXITING(P) \
do { ASSERT(!ERTS_PROC_IS_EXITING((P))); } while (0)
#else
-#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P)
+#define ERTS_ASSERT_IS_NOT_EXITING(P)
#endif
-#else /* !ERTS_SMP */
-
-#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P)
-
-#define erts_pid2proc_not_running erts_pid2proc
-#define erts_pid2proc_nropt erts_pid2proc
-
-#endif
#define ERTS_PROC_IS_EXITING(P) \
- (ERTS_PSFLG_EXITING & erts_smp_atomic32_read_acqb(&(P)->state))
+ (ERTS_PSFLG_EXITING & erts_atomic32_read_acqb(&(P)->state))
/* Minimum NUMBER of processes for a small system to start */
#define ERTS_MIN_PROCESSES 1024
-#if defined(ERTS_SMP) && ERTS_MIN_PROCESSES < ERTS_NO_OF_PIX_LOCKS
+#if ERTS_MIN_PROCESSES < ERTS_NO_OF_PIX_LOCKS
#undef ERTS_MIN_PROCESSES
#define ERTS_MIN_PROCESSES ERTS_NO_OF_PIX_LOCKS
#endif
-void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-
-void erts_interupt_aux_thread_timed(ErtsMonotonicTime timeout_time);
+void erts_notify_inc_runq(ErtsRunQueue *runq);
-#ifdef ERTS_SMP
void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t);
ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
+void erts_aux_thread_poke(void);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -2563,9 +2635,9 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
{
erts_aint32_t flags;
ERTS_THR_MEMORY_BARRIER;
- flags = erts_smp_atomic32_read_nob(&ssi->flags);
+ flags = erts_atomic32_read_nob(&ssi->flags);
if (flags & ERTS_SSI_FLG_SLEEPING) {
- flags = erts_smp_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
+ flags = erts_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
erts_sched_finish_poke(ssi, flags);
}
}
@@ -2573,7 +2645,6 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* #ifdef ERTS_SMP */
#include "erl_process_lock.h"
@@ -2583,5 +2654,5 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
void erts_halt(int code);
-extern erts_smp_atomic32_t erts_halt_progress;
+extern erts_atomic32_t erts_halt_progress;
extern int erts_halt_code;
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index d8c2eaba94..64ee483079 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -54,10 +54,10 @@
#define HASH_RANGE(PDict) ((PDict)->usedSlots)
#define MAKE_HASH(Term) \
- ((is_small(Term)) ? unsigned_val(Term) : \
+ ((is_small(Term)) ? (Uint32) unsigned_val(Term) : \
((is_atom(Term)) ? \
- (atom_tab(atom_val(Term))->slot.bucket.hvalue) : \
- make_internal_hash(Term)))
+ (Uint32) atom_val(Term) : \
+ make_internal_hash(Term, 0)))
#define PD_SZ2BYTES(Sz) (sizeof(ProcDict) + ((Sz) - 1)*sizeof(Eterm))
@@ -79,6 +79,8 @@
/* Array access macro */
#define ARRAY_GET(PDict, Index) (ASSERT((Index) < (PDict)->arraySize), \
(PDict)->data[Index])
+#define ARRAY_GET_PTR(PDict, Index) (ASSERT((Index) < (PDict)->arraySize), \
+ &(PDict)->data[Index])
#define ARRAY_PUT(PDict, Index, Val) (ASSERT((Index) < (PDict)->arraySize), \
(PDict)->data[Index] = (Val))
@@ -92,7 +94,7 @@ static void pd_hash_erase_all(Process *p);
static Eterm pd_hash_get_with_hval(Process *p, Eterm bucket, Eterm id);
static Eterm pd_hash_get_keys(Process *p, Eterm value);
static Eterm pd_hash_get_all_keys(Process *p, ProcDict *pd);
-static Eterm pd_hash_get_all(Process *p, ProcDict *pd);
+static Eterm pd_hash_get_all(Process *p, ProcDict *pd, int keep_dict);
static Eterm pd_hash_put(Process *p, Eterm id, Eterm value);
static void shrink(Process *p, Eterm* ret);
@@ -156,7 +158,7 @@ erts_pd_set_initial_size(int size)
* Called from break handler
*/
void
-erts_dictionary_dump(int to, void *to_arg, ProcDict *pd)
+erts_dictionary_dump(fmtfn_t to, void *to_arg, ProcDict *pd)
{
unsigned int i;
#ifdef DEBUG
@@ -196,8 +198,8 @@ erts_dictionary_dump(int to, void *to_arg, ProcDict *pd)
}
void
-erts_deep_dictionary_dump(int to, void *to_arg,
- ProcDict* pd, void (*cb)(int, void *, Eterm))
+erts_deep_dictionary_dump(fmtfn_t to, void *to_arg,
+ ProcDict* pd, void (*cb)(fmtfn_t, void *, Eterm))
{
unsigned int i;
Eterm t;
@@ -237,39 +239,70 @@ erts_erase_dicts(Process *p)
/*
* Called from process_info/1,2.
*/
-Eterm erts_dictionary_copy(Process *p, ProcDict *pd)
+Eterm erts_dictionary_copy(ErtsHeapFactory *hfact, ProcDict *pd, Uint reserve_size)
{
- Eterm* hp;
- Eterm* heap_start;
- Eterm res = NIL;
- Eterm tmp, tmp2;
+ Eterm res;
unsigned int i, num;
+ Uint *sz;
+ Uint szi, rsz = reserve_size;
- if (pd == NULL) {
- return res;
- }
+ if (pd == NULL)
+ return NIL;
PD_CHECK(pd);
num = HASH_RANGE(pd);
- heap_start = hp = (Eterm *) erts_alloc(ERTS_ALC_T_TMP,
- sizeof(Eterm) * pd->numElements * 2);
- for (i = 0; i < num; ++i) {
- tmp = ARRAY_GET(pd, i);
+ sz = (Uint *) erts_alloc(ERTS_ALC_T_TMP, sizeof(Uint) * pd->numElements);
+
+ for (i = 0, szi = 0; i < num; ++i) {
+ Eterm tmp = ARRAY_GET(pd, i);
if (is_boxed(tmp)) {
+ Uint size;
ASSERT(is_tuple(tmp));
- res = CONS(hp, tmp, res);
- hp += 2;
- } else if (is_list(tmp)) {
+ size = size_object(tmp) + 2;
+ sz[szi++] = size;
+ rsz += size;
+ }
+ else if (is_list(tmp)) {
while (tmp != NIL) {
- tmp2 = TCAR(tmp);
- res = CONS(hp, tmp2, res);
- hp += 2;
+ Uint size = size_object(TCAR(tmp)) + 2;
+ sz[szi++] = size;
+ rsz += size;
+ tmp = TCDR(tmp);
+ }
+ }
+ }
+
+ res = NIL;
+
+ for (i = 0, szi = 0; i < num; ++i) {
+ Eterm tmp = ARRAY_GET(pd, i);
+ if (is_boxed(tmp)) {
+ Uint size;
+ Eterm el, *hp;
+ ASSERT(is_tuple(tmp));
+ size = sz[szi++];
+ rsz -= size;
+ hp = erts_produce_heap(hfact, size, rsz);
+ el = copy_struct(tmp, size-2, &hp, hfact->off_heap);
+ res = CONS(hp, el, res);
+ }
+ else if (is_list(tmp)) {
+ while (tmp != NIL) {
+ Uint size = sz[szi++];
+ Eterm el, *hp;
+ rsz -= size;
+ hp = erts_produce_heap(hfact, size, rsz);
+ el = copy_struct(TCAR(tmp), size-2, &hp, hfact->off_heap);
+ res = CONS(hp, el, res);
tmp = TCDR(tmp);
}
}
}
- res = copy_object(res, p);
- erts_free(ERTS_ALC_T_TMP, (void *) heap_start);
+
+ ASSERT(rsz == reserve_size);
+
+ erts_free(ERTS_ALC_T_TMP, sz);
+
return res;
}
@@ -281,7 +314,7 @@ BIF_RETTYPE get_0(BIF_ALIST_0)
{
Eterm ret;
PD_CHECK(BIF_P->dictionary);
- ret = pd_hash_get_all(BIF_P, BIF_P->dictionary);
+ ret = pd_hash_get_all(BIF_P, BIF_P->dictionary, 1);
PD_CHECK(BIF_P->dictionary);
BIF_RET(ret);
}
@@ -329,7 +362,7 @@ BIF_RETTYPE erase_0(BIF_ALIST_0)
{
Eterm ret;
PD_CHECK(BIF_P->dictionary);
- ret = pd_hash_get_all(BIF_P, BIF_P->dictionary);
+ ret = pd_hash_get_all(BIF_P, BIF_P->dictionary, 0);
pd_hash_erase_all(BIF_P);
PD_CHECK(BIF_P->dictionary);
BIF_RET(ret);
@@ -408,6 +441,11 @@ static void pd_hash_erase_all(Process *p)
}
}
+Uint32 erts_pd_make_hx(Eterm key)
+{
+ return MAKE_HASH(key);
+}
+
Eterm erts_pd_hash_get_with_hx(Process *p, Uint32 hx, Eterm id)
{
unsigned int hval;
@@ -536,29 +574,46 @@ static Eterm pd_hash_get_keys(Process *p, Eterm value)
static Eterm
-pd_hash_get_all(Process *p, ProcDict *pd)
+pd_hash_get_all(Process *p, ProcDict *pd, int keep_dict)
{
Eterm* hp;
+ Eterm* tp;
Eterm res = NIL;
Eterm tmp, tmp2;
unsigned int i;
unsigned int num;
+ Uint need;
if (pd == NULL) {
return res;
}
num = HASH_RANGE(pd);
- hp = HAlloc(p, pd->numElements * 2);
-
+
+ /*
+ * If this is not erase/0, then must copy all key-value tuples
+ * as they may be mutated by put/2.
+ */
+ need = pd->numElements * (keep_dict ? 2+3 : 2);
+ hp = HAlloc(p, need);
+
for (i = 0; i < num; ++i) {
tmp = ARRAY_GET(pd, i);
if (is_boxed(tmp)) {
- ASSERT(is_tuple(tmp));
+ if (keep_dict) {
+ tp = tuple_val(tmp);
+ tmp = TUPLE2(hp, tp[1], tp[2]);
+ hp += 3;
+ }
res = CONS(hp, tmp, res);
hp += 2;
} else if (is_list(tmp)) {
while (tmp != NIL) {
tmp2 = TCAR(tmp);
+ if (keep_dict) {
+ tp = tuple_val(tmp2);
+ tmp2 = TUPLE2(hp, tp[1], tp[2]);
+ hp += 3;
+ }
res = CONS(hp, tmp2, res);
hp += 2;
tmp = TCDR(tmp);
@@ -572,11 +627,14 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
{
unsigned int hval;
Eterm *hp;
+ Eterm *tp;
+ Eterm *bucket;
Eterm tpl;
Eterm old;
+ Eterm old_val = am_undefined;
Eterm tmp;
int needed;
- int i = 0;
+ int new_key = 1;
#ifdef DEBUG
Eterm *hp_limit;
#endif
@@ -590,7 +648,8 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
p->dictionary->numElements = 0;
}
hval = pd_hash_value(p->dictionary, id);
- old = ARRAY_GET(p->dictionary, hval);
+ bucket = ARRAY_GET_PTR(p->dictionary, hval);
+ old = *bucket;
/*
* Calculate the number of heap words needed and garbage
@@ -598,32 +657,49 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
*/
needed = 3; /* {Key,Value} tuple */
if (is_boxed(old)) {
- /*
- * We don't want to compare keys twice, so we'll always
- * reserve the space for two CONS cells.
- */
- needed += 2+2;
+ ASSERT(is_tuple(old));
+ tp = tuple_val(old);
+ if (EQ(tp[1], id)) {
+ old_val = tp[2];
+ if (is_immed(value)) {
+ tp[2] = value; /* DESTRUCTIVE HEAP ASSIGNMENT */
+ return old_val;
+ }
+ new_key = 0;
+ }
+ else {
+ needed += 2+2;
+ }
} else if (is_list(old)) {
- i = 0;
- for (tmp = old; tmp != NIL && !EQ(tuple_val(TCAR(tmp))[1], id); tmp = TCDR(tmp)) {
- ++i;
- }
- if (is_nil(tmp)) {
- i = -1;
- needed += 2;
- } else {
- needed += 2*(i+1);
+ Eterm* prev_cdr = bucket;
+
+ needed += 2;
+ for (tmp = old; tmp != NIL; prev_cdr = &TCDR(tmp), tmp = *prev_cdr) {
+ tp = tuple_val(TCAR(tmp));
+ if (EQ(tp[1], id)) {
+ old_val = tp[2];
+ if (is_immed(value)) {
+ tp[2] = value; /* DESTRUCTIVE HEAP ASSIGNMENT */
+ return old_val;
+ }
+ new_key = 0;
+ /* Unlink old {Key,Value} from list */
+ *prev_cdr = TCDR(tmp); /* maybe DESTRUCTIVE HEAP ASSIGNMENT */
+ break;
+ }
}
}
if (HeapWordsLeft(p) < needed) {
Eterm root[3];
root[0] = id;
root[1] = value;
- root[2] = old;
+ root[2] = old_val;
erts_garbage_collect(p, needed, root, 3);
id = root[0];
value = root[1];
- old = root[2];
+ old_val = root[2];
+ ASSERT(bucket == ARRAY_GET_PTR(p->dictionary, hval));
+ old = *bucket;
}
#ifdef DEBUG
hp_limit = p->htop + needed;
@@ -639,66 +715,29 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
* Update the dictionary.
*/
if (is_nil(old)) {
- ARRAY_PUT(p->dictionary, hval, tpl);
- ++(p->dictionary->numElements);
+ *bucket = tpl;
} else if (is_boxed(old)) {
ASSERT(is_tuple(old));
- if (EQ(tuple_val(old)[1],id)) {
- ARRAY_PUT(p->dictionary, hval, tpl);
- return tuple_val(old)[2];
+ if (!new_key) {
+ ASSERT(EQ(tuple_val(old)[1],id));
+ *bucket = tpl;
+ return old_val;
} else {
hp = HeapOnlyAlloc(p, 4);
tmp = CONS(hp, old, NIL);
hp += 2;
- ++(p->dictionary->numElements);
- ARRAY_PUT(p->dictionary, hval, CONS(hp, tpl, tmp));
+ *bucket = CONS(hp, tpl, tmp);
hp += 2;
ASSERT(hp <= hp_limit);
}
} else if (is_list(old)) {
- if (i == -1) {
- /*
- * New key. Simply prepend the tuple to the beginning of the list.
- */
- hp = HeapOnlyAlloc(p, 2);
- ARRAY_PUT(p->dictionary, hval, CONS(hp, tpl, old));
- hp += 2;
- ASSERT(hp <= hp_limit);
- ++(p->dictionary->numElements);
- } else {
- /*
- * i = Number of CDRs to skip to reach the changed element in the list.
- *
- * Replace old value in list. To avoid pointers from the old generation
- * to the new, we must rebuild the list from the beginning up to and
- * including the changed element.
- */
- Eterm nlist;
- int j;
-
- hp = HeapOnlyAlloc(p, (i+1)*2);
-
- /* Find the list element to change. */
- for (j = 0, nlist = old; j < i; j++, nlist = TCDR(nlist)) {
- ;
- }
- ASSERT(EQ(tuple_val(TCAR(nlist))[1], id));
- nlist = TCDR(nlist); /* Unchanged part of list. */
-
- /* Rebuild list before the updated element. */
- for (tmp = old; i-- > 0; tmp = TCDR(tmp)) {
- nlist = CONS(hp, TCAR(tmp), nlist);
- hp += 2;
- }
- ASSERT(EQ(tuple_val(TCAR(tmp))[1], id));
-
- /* Put the updated element first in the new list. */
- nlist = CONS(hp, tpl, nlist);
- hp += 2;
- ASSERT(hp <= hp_limit);
- ARRAY_PUT(p->dictionary, hval, nlist);
- return tuple_val(TCAR(tmp))[2];
- }
+ /*
+ * Simply prepend the tuple to the beginning of the list.
+ */
+ hp = HeapOnlyAlloc(p, 2);
+ *bucket = CONS(hp, tpl, *bucket);
+ hp += 2;
+ ASSERT(hp <= hp_limit);
} else {
#ifdef DEBUG
erts_fprintf(stderr,
@@ -709,10 +748,13 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
erts_exit(ERTS_ERROR_EXIT, "Damaged process dictionary found during put/2.");
}
+
+ p->dictionary->numElements += new_key;
+
if (HASH_RANGE(p->dictionary) <= p->dictionary->numElements) {
grow(p);
}
- return am_undefined;
+ return old_val;
}
/*
diff --git a/erts/emulator/beam/erl_process_dict.h b/erts/emulator/beam/erl_process_dict.h
index 387562058c..3ff2354f91 100644
--- a/erts/emulator/beam/erl_process_dict.h
+++ b/erts/emulator/beam/erl_process_dict.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -37,12 +37,13 @@ typedef struct proc_dict {
int erts_pd_set_initial_size(int size);
Uint erts_dicts_mem_size(struct process *p);
void erts_erase_dicts(struct process *p);
-void erts_dictionary_dump(int to, void *to_arg, ProcDict *pd);
-void erts_deep_dictionary_dump(int to, void *to_arg,
- ProcDict* pd, void (*cb)(int, void *, Eterm obj));
-Eterm erts_dictionary_copy(struct process *p, ProcDict *pd);
+void erts_dictionary_dump(fmtfn_t to, void *to_arg, ProcDict *pd);
+void erts_deep_dictionary_dump(fmtfn_t to, void *to_arg,
+ ProcDict* pd, void (*cb)(fmtfn_t, void *, Eterm obj));
+Eterm erts_dictionary_copy(ErtsHeapFactory *hfact, ProcDict *pd, Uint reserve_size);
Eterm erts_pd_hash_get(struct process *p, Eterm id);
+Uint32 erts_pd_make_hx(Eterm key);
Eterm erts_pd_hash_get_with_hx(Process *p, Uint32 hx, Eterm id);
#endif
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index a70dfb8e73..ac5054ea10 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -32,25 +32,35 @@
#include "dist.h"
#include "beam_catches.h"
#include "erl_binary.h"
+#include "erl_map.h"
#define ERTS_WANT_EXTERNAL_TAGS
#include "external.h"
+#include "erl_proc_sig_queue.h"
#define PTR_FMT "%bpX"
#define ETERM_FMT "%beX"
#define OUR_NIL _make_header(0,_TAG_HEADER_FLOAT)
-static void dump_process_info(int to, void *to_arg, Process *p);
-static void dump_element(int to, void *to_arg, Eterm x);
-static void dump_dist_ext(int to, void *to_arg, ErtsDistExternal *edep);
-static void dump_element_nl(int to, void *to_arg, Eterm x);
-static int stack_element_dump(int to, void *to_arg, Eterm* sp,
+static void dump_process_info(fmtfn_t to, void *to_arg, Process *p);
+static void dump_element(fmtfn_t to, void *to_arg, Eterm x);
+static void dump_dist_ext(fmtfn_t to, void *to_arg, ErtsDistExternal *edep);
+static void dump_element_nl(fmtfn_t to, void *to_arg, Eterm x);
+static int stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp,
int yreg);
-static void stack_trace_dump(int to, void *to_arg, Eterm* sp);
-static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
-static void heap_dump(int to, void *to_arg, Eterm x);
-static void dump_binaries(int to, void *to_arg, Binary* root);
-static void dump_externally(int to, void *to_arg, Eterm term);
+static void stack_trace_dump(fmtfn_t to, void *to_arg, Eterm* sp);
+static void print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x);
+static void heap_dump(fmtfn_t to, void *to_arg, Eterm x);
+static void dump_binaries(fmtfn_t to, void *to_arg, Binary* root);
+void erts_print_base64(fmtfn_t to, void *to_arg,
+ byte* src, Uint size);
+static void dump_externally(fmtfn_t to, void *to_arg, Eterm term);
+static void mark_literal(Eterm* ptr);
+static void init_literal_areas(void);
+static void dump_literals(fmtfn_t to, void *to_arg);
+static void dump_persistent_terms(fmtfn_t to, void *to_arg);
+static void dump_module_literals(fmtfn_t to, void *to_arg,
+ ErtsLiteralArea* lit_area);
static Binary* all_binaries;
@@ -58,131 +68,204 @@ extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
-
void
-erts_deep_process_dump(int to, void *to_arg)
+erts_deep_process_dump(fmtfn_t to, void *to_arg)
{
int i, max = erts_ptab_max(&erts_proc);
all_binaries = NULL;
-
+ init_literal_areas();
+ erts_init_persistent_dumping();
+
for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
if (p && p->i != ENULL) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_GC)))
- dump_process_info(to, to_arg, p);
+ erts_aint32_t state = erts_atomic32_read_acqb(&p->state);
+ if (state & ERTS_PSFLG_EXITING)
+ continue;
+ if (state & ERTS_PSFLG_GC) {
+ ErtsSchedulerData *sdp = erts_get_scheduler_data();
+ if (!sdp || p != sdp->current_process)
+ continue;
+
+ /* We want to dump the garbing process that caused the dump */
+ }
+
+ dump_process_info(to, to_arg, p);
}
}
+ dump_persistent_terms(to, to_arg);
+ dump_literals(to, to_arg);
dump_binaries(to, to_arg, all_binaries);
}
-Uint erts_process_memory(Process *p, int incl_msg_inq) {
- ErtsMessage *mp;
- Uint size = 0;
- struct saved_calls *scb;
- size += sizeof(Process);
+static void
+monitor_size(ErtsMonitor *mon, void *vsize)
+{
+ *((Uint *) vsize) += erts_monitor_size(mon);
+}
- if (incl_msg_inq)
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
+static void
+link_size(ErtsMonitor *lnk, void *vsize)
+{
+ *((Uint *) vsize) += erts_link_size(lnk);
+}
- erts_doforall_links(ERTS_P_LINKS(p), &erts_one_link_size, &size);
- erts_doforall_monitors(ERTS_P_MONITORS(p), &erts_one_mon_size, &size);
- size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
- if (p->old_hend && p->old_heap)
- size += (p->old_hend - p->old_heap) * sizeof(Eterm);
+Uint erts_process_memory(Process *p, int include_sigs_in_transit)
+{
+ Uint size = 0;
+ struct saved_calls *scb;
+
+ size += sizeof(Process);
+
+ erts_link_tree_foreach(ERTS_P_LINKS(p),
+ link_size, (void *) &size);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(p),
+ monitor_size, (void *) &size);
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(p),
+ monitor_size, (void *) &size);
+ size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
+ if (p->abandoned_heap)
+ size += (p->hend - p->heap) * sizeof(Eterm);
+ if (p->old_hend && p->old_heap)
+ size += (p->old_hend - p->old_heap) * sizeof(Eterm);
+
+ if (!include_sigs_in_transit) {
+ /*
+ * Size of message queue!
+ *
+ * Note that this assumes that any part of message
+ * queue located in middle queue have been moved
+ * into the inner queue prior to this call.
+ * process_info() management ensures this is done-
+ */
+ ErtsMessage *mp;
+ for (mp = p->sig_qs.first; mp; mp = mp->next) {
+ ASSERT(ERTS_SIG_IS_MSG((ErtsSignal *) mp));
+ size += sizeof(ErtsMessage);
+ if (mp->data.attached)
+ size += erts_msg_attached_data_size(mp) * sizeof(Eterm);
+ }
+ }
+ else {
+ /*
+ * Size of message queue plus size of all signals
+ * in transit to the process!
+ */
+ erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(p);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+
+ ERTS_FOREACH_SIG_PRIVQS(
+ p, mp,
+ {
+ size += sizeof(ErtsMessage);
+ if (ERTS_SIG_IS_NON_MSG((ErtsSignal *) mp))
+ size += erts_proc_sig_signal_size((ErtsSignal *) mp);
+ else if (mp->data.attached)
+ size += erts_msg_attached_data_size(mp) * sizeof(Eterm);
+ });
+ }
- size += p->msg.len * sizeof(ErtsMessage);
+ if (p->arg_reg != p->def_arg_reg) {
+ size += p->arity * sizeof(p->arg_reg[0]);
+ }
- for (mp = p->msg.first; mp; mp = mp->next)
- if (mp->data.attached)
- size += erts_msg_attached_data_size(mp)*sizeof(Eterm);
+ if (erts_atomic_read_nob(&p->psd) != (erts_aint_t) NULL)
+ size += sizeof(ErtsPSD);
- if (p->arg_reg != p->def_arg_reg) {
- size += p->arity * sizeof(p->arg_reg[0]);
- }
+ scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
+ if (scb) {
+ size += (sizeof(struct saved_calls)
+ + (scb->len-1) * sizeof(scb->ct[0]));
+ }
- if (erts_smp_atomic_read_nob(&p->psd) != (erts_aint_t) NULL)
- size += sizeof(ErtsPSD);
+ size += erts_dicts_mem_size(p);
+ return size;
+}
- scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
- if (scb) {
- size += (sizeof(struct saved_calls)
- + (scb->len-1) * sizeof(scb->ct[0]));
- }
+static ERTS_INLINE void
+dump_msg(fmtfn_t to, void *to_arg, ErtsMessage *mp)
+{
+ if (ERTS_SIG_IS_MSG((ErtsSignal *) mp)) {
+ Eterm mesg = ERL_MESSAGE_TERM(mp);
+ if (is_value(mesg))
+ dump_element(to, to_arg, mesg);
+ else
+ dump_dist_ext(to, to_arg, mp->data.dist_ext);
+ mesg = ERL_MESSAGE_TOKEN(mp);
+ erts_print(to, to_arg, ":");
+ dump_element(to, to_arg, mesg);
+ erts_print(to, to_arg, "\n");
+ }
+}
- size += erts_dicts_mem_size(p);
- return size;
+static ERTS_INLINE void
+heap_dump_msg(fmtfn_t to, void *to_arg, ErtsMessage *mp)
+{
+ if (ERTS_SIG_IS_MSG((ErtsSignal *) mp)) {
+ Eterm mesg = ERL_MESSAGE_TERM(mp);
+ if (is_value(mesg))
+ heap_dump(to, to_arg, mesg);
+ mesg = ERL_MESSAGE_TOKEN(mp);
+ heap_dump(to, to_arg, mesg);
+ }
}
static void
-dump_process_info(int to, void *to_arg, Process *p)
+dump_process_info(fmtfn_t to, void *to_arg, Process *p)
{
Eterm* sp;
- ErtsMessage* mp;
int yreg = -1;
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE)
+ return;
+
+ erts_proc_sig_fetch(p);
- if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0 && p->msg.first) {
+ if (p->sig_qs.first || p->sig_qs.cont) {
erts_print(to, to_arg, "=proc_messages:%T\n", p->common.id);
- for (mp = p->msg.first; mp != NULL; mp = mp->next) {
- Eterm mesg = ERL_MESSAGE_TERM(mp);
- if (is_value(mesg))
- dump_element(to, to_arg, mesg);
- else
- dump_dist_ext(to, to_arg, mp->data.dist_ext);
- mesg = ERL_MESSAGE_TOKEN(mp);
- erts_print(to, to_arg, ":");
- dump_element(to, to_arg, mesg);
- erts_print(to, to_arg, "\n");
- }
+ ERTS_FOREACH_SIG_PRIVQS(p, mp, dump_msg(to, to_arg, mp));
}
- if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
- if (p->dictionary) {
- erts_print(to, to_arg, "=proc_dictionary:%T\n", p->common.id);
- erts_deep_dictionary_dump(to, to_arg,
- p->dictionary, dump_element_nl);
- }
+ if (p->dictionary) {
+ erts_print(to, to_arg, "=proc_dictionary:%T\n", p->common.id);
+ erts_deep_dictionary_dump(to, to_arg,
+ p->dictionary, dump_element_nl);
}
- if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
- erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id);
- for (sp = p->stop; sp < STACK_START(p); sp++) {
- yreg = stack_element_dump(to, to_arg, sp, yreg);
- }
+ erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id);
+ for (sp = p->stop; sp < STACK_START(p); sp++) {
+ yreg = stack_element_dump(to, to_arg, sp, yreg);
+ }
- erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id);
- for (sp = p->stop; sp < STACK_START(p); sp++) {
- Eterm term = *sp;
-
- if (!is_catch(term) && !is_CP(term)) {
- heap_dump(to, to_arg, term);
- }
- }
- for (mp = p->msg.first; mp != NULL; mp = mp->next) {
- Eterm mesg = ERL_MESSAGE_TERM(mp);
- if (is_value(mesg))
- heap_dump(to, to_arg, mesg);
- mesg = ERL_MESSAGE_TOKEN(mp);
- heap_dump(to, to_arg, mesg);
- }
- if (p->dictionary) {
- erts_deep_dictionary_dump(to, to_arg, p->dictionary, heap_dump);
- }
+ erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id);
+ for (sp = p->stop; sp < STACK_START(p); sp++) {
+ Eterm term = *sp;
+
+ if (!is_catch(term) && !is_CP(term)) {
+ heap_dump(to, to_arg, term);
+ }
+ }
+
+ if (p->sig_qs.first || p->sig_qs.cont)
+ ERTS_FOREACH_SIG_PRIVQS(p, mp, heap_dump_msg(to, to_arg, mp));
+
+ if (p->dictionary) {
+ erts_deep_dictionary_dump(to, to_arg, p->dictionary, heap_dump);
}
}
static void
-dump_dist_ext(int to, void *to_arg, ErtsDistExternal *edep)
+dump_dist_ext(fmtfn_t to, void *to_arg, ErtsDistExternal *edep)
{
if (!edep)
erts_print(to, to_arg, "D0:E0:");
else {
byte *e;
size_t sz;
+
if (!(edep->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB))
erts_print(to, to_arg, "D0:");
else {
@@ -200,17 +283,23 @@ dump_dist_ext(int to, void *to_arg, ErtsDistExternal *edep)
else {
ASSERT(*e == VERSION_MAGIC);
}
-
erts_print(to, to_arg, "E%X:", sz);
- if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR)
- erts_print(to, to_arg, "%02X", VERSION_MAGIC);
- while (e < edep->ext_endp)
- erts_print(to, to_arg, "%02X", *e++);
+ if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) {
+ byte sbuf[3];
+ int i = 0;
+
+ sbuf[i++] = VERSION_MAGIC;
+ while (i < sizeof(sbuf) && e < edep->ext_endp) {
+ sbuf[i++] = *e++;
+ }
+ erts_print_base64(to, to_arg, sbuf, i);
+ }
+ erts_print_base64(to, to_arg, e, edep->ext_endp - e);
}
}
static void
-dump_element(int to, void *to_arg, Eterm x)
+dump_element(fmtfn_t to, void *to_arg, Eterm x)
{
if (is_list(x)) {
erts_print(to, to_arg, "H" PTR_FMT, list_val(x));
@@ -240,14 +329,14 @@ dump_element(int to, void *to_arg, Eterm x)
}
static void
-dump_element_nl(int to, void *to_arg, Eterm x)
+dump_element_nl(fmtfn_t to, void *to_arg, Eterm x)
{
dump_element(to, to_arg, x);
erts_putc(to, to_arg, '\n');
}
static void
-stack_trace_dump(int to, void *to_arg, Eterm *sp) {
+stack_trace_dump(fmtfn_t to, void *to_arg, Eterm *sp) {
Eterm x = *sp;
if (is_CP(x)) {
erts_print(to, to_arg, "%p:", sp);
@@ -258,7 +347,7 @@ stack_trace_dump(int to, void *to_arg, Eterm *sp) {
}
void
-erts_limited_stack_trace(int to, void *to_arg, Process *p)
+erts_limited_stack_trace(fmtfn_t to, void *to_arg, Process *p)
{
Eterm* sp;
@@ -304,7 +393,7 @@ erts_limited_stack_trace(int to, void *to_arg, Process *p)
}
static int
-stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg)
+stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg)
{
Eterm x = *sp;
@@ -332,10 +421,10 @@ stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg)
}
static void
-print_function_from_pc(int to, void *to_arg, BeamInstr* x)
+print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
{
- BeamInstr* addr = find_function_from_pc(x);
- if (addr == NULL) {
+ ErtsCodeMFA* cmfa = find_function_from_pc(x);
+ if (cmfa == NULL) {
if (x == beam_exit) {
erts_print(to, to_arg, "<terminate process>");
} else if (x == beam_continue_exit) {
@@ -347,12 +436,13 @@ print_function_from_pc(int to, void *to_arg, BeamInstr* x)
}
} else {
erts_print(to, to_arg, "%T:%T/%bpu + %bpu",
- addr[0], addr[1], addr[2], ((x-addr)-2) * sizeof(Eterm));
+ cmfa->module, cmfa->function, cmfa->arity,
+ (x-(BeamInstr*)cmfa) * sizeof(Eterm));
}
}
static void
-heap_dump(int to, void *to_arg, Eterm x)
+heap_dump(fmtfn_t to, void *to_arg, Eterm x)
{
DeclareTmpHeapNoproc(last,1);
Eterm* next = last;
@@ -369,7 +459,9 @@ heap_dump(int to, void *to_arg, Eterm x)
next = (Eterm *) x;
} else if (is_list(x)) {
ptr = list_val(x);
- if (ptr[0] != OUR_NIL) {
+ if (erts_is_literal(x, ptr)) {
+ mark_literal(ptr);
+ } else if (ptr[0] != OUR_NIL) {
erts_print(to, to_arg, PTR_FMT ":l", ptr);
dump_element(to, to_arg, ptr[0]);
erts_putc(to, to_arg, '|');
@@ -388,7 +480,9 @@ heap_dump(int to, void *to_arg, Eterm x)
ptr = boxed_val(x);
hdr = *ptr;
- if (hdr != OUR_NIL) { /* If not visited */
+ if (erts_is_literal(x, ptr)) {
+ mark_literal(ptr);
+ } else if (hdr != OUR_NIL) {
erts_print(to, to_arg, PTR_FMT ":", ptr);
if (is_arity_value(hdr)) {
Uint i;
@@ -429,22 +523,19 @@ heap_dump(int to, void *to_arg, Eterm x)
} else if (is_binary_header(hdr)) {
Uint tag = thing_subtag(hdr);
Uint size = binary_size(x);
- Uint i;
if (tag == HEAP_BINARY_SUBTAG) {
byte* p;
erts_print(to, to_arg, "Yh%X:", size);
p = binary_bytes(x);
- for (i = 0; i < size; i++) {
- erts_print(to, to_arg, "%02X", p[i]);
- }
+ erts_print_base64(to, to_arg, p, size);
} else if (tag == REFC_BINARY_SUBTAG) {
ProcBin* pb = (ProcBin *) binary_val(x);
Binary* val = pb->val;
- if (erts_smp_atomic_xchg_nob(&val->refc, 0) != 0) {
- val->flags = (UWord) all_binaries;
+ if (erts_atomic_xchg_nob(&val->intern.refc, 0) != 0) {
+ val->intern.flags = (UWord) all_binaries;
all_binaries = val;
}
erts_print(to, to_arg,
@@ -494,11 +585,77 @@ heap_dump(int to, void *to_arg, Eterm x)
erts_print(to, to_arg, "p<%beu.%beu>\n",
port_channel_no(x), port_number(x));
*ptr = OUR_NIL;
+ } else if (is_map_header(hdr)) {
+ if (is_flatmap_header(hdr)) {
+ flatmap_t* fmp = (flatmap_t *) flatmap_val(x);
+ Eterm* values = ptr + sizeof(flatmap_t) / sizeof(Eterm);
+ Uint map_size = fmp->size;
+ int i;
+
+ erts_print(to, to_arg, "Mf" ETERM_FMT ":", map_size);
+ dump_element(to, to_arg, fmp->keys);
+ erts_putc(to, to_arg, ':');
+ for (i = 0; i < map_size; i++) {
+ dump_element(to, to_arg, values[i]);
+ if (is_immed(values[i])) {
+ values[i] = make_small(0);
+ }
+ if (i < map_size-1) {
+ erts_putc(to, to_arg, ',');
+ }
+ }
+ erts_putc(to, to_arg, '\n');
+ *ptr = OUR_NIL;
+ x = fmp->keys;
+ if (map_size) {
+ fmp->keys = (Eterm) next;
+ next = &values[map_size-1];
+ }
+ continue;
+ } else {
+ Uint i;
+ Uint sz = 0;
+ Eterm* nodes = ptr + 1;
+
+ switch (MAP_HEADER_TYPE(hdr)) {
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY:
+ nodes++;
+ sz = 16;
+ erts_print(to, to_arg, "Mh" ETERM_FMT ":" ETERM_FMT ":",
+ hashmap_size(x), sz);
+ break;
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP:
+ nodes++;
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ erts_print(to, to_arg, "Mh" ETERM_FMT ":" ETERM_FMT ":",
+ hashmap_size(x), sz);
+ break;
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ erts_print(to, to_arg, "Mn" ETERM_FMT ":", sz);
+ break;
+ }
+ *ptr = OUR_NIL;
+ for (i = 0; i < sz; i++) {
+ dump_element(to, to_arg, nodes[i]);
+ if (is_immed(nodes[i])) {
+ nodes[i] = make_small(0);
+ }
+ if (i < sz-1) {
+ erts_putc(to, to_arg, ',');
+ }
+ }
+ erts_putc(to, to_arg, '\n');
+ x = nodes[0];
+ nodes[0] = (Eterm) next;
+ next = &nodes[sz-1];
+ continue;
+ }
} else {
/*
* All other we dump in the external term format.
*/
- dump_externally(to, to_arg, x);
+ dump_externally(to, to_arg, x);
erts_putc(to, to_arg, '\n');
*ptr = OUR_NIL;
}
@@ -512,25 +669,22 @@ heap_dump(int to, void *to_arg, Eterm x)
}
static void
-dump_binaries(int to, void *to_arg, Binary* current)
+dump_binaries(fmtfn_t to, void *to_arg, Binary* current)
{
while (current) {
- long i;
- long size = current->orig_size;
+ SWord size = current->orig_size;
byte* bytes = (byte*) current->orig_bytes;
erts_print(to, to_arg, "=binary:" PTR_FMT "\n", current);
erts_print(to, to_arg, "%X:", size);
- for (i = 0; i < size; i++) {
- erts_print(to, to_arg, "%02X", bytes[i]);
- }
- erts_putc(to, to_arg, '\n');
- current = (Binary *) current->flags;
+ erts_print_base64(to, to_arg, bytes, size);
+ erts_putc(to, to_arg, '\n');
+ current = (Binary *) current->intern.flags;
}
}
static void
-dump_externally(int to, void *to_arg, Eterm term)
+dump_externally(fmtfn_t to, void *to_arg, Eterm term)
{
byte sbuf[1024]; /* encode and hope for the best ... */
byte* s;
@@ -560,20 +714,324 @@ dump_externally(int to, void *to_arg, Eterm term)
}
}
- /* Do not handle maps */
- if (is_map(term)) {
- term = am_undefined;
- }
-
s = p = sbuf;
erts_encode_ext(term, &p);
erts_print(to, to_arg, "E%X:", p-s);
- while (s < p) {
- erts_print(to, to_arg, "%02X", *s++);
+ erts_print_base64(to, to_arg, sbuf, p-s);
+}
+
+/*
+ * Handle dumping of literal areas.
+ */
+
+static ErtsLiteralArea** lit_areas;
+static Uint num_lit_areas;
+
+static int compare_areas(const void * a, const void * b)
+{
+ ErtsLiteralArea** a_p = (ErtsLiteralArea **) a;
+ ErtsLiteralArea** b_p = (ErtsLiteralArea **) b;
+
+ if (*a_p < *b_p) {
+ return -1;
+ } else if (*b_p < *a_p) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+
+static void
+init_literal_areas(void)
+{
+ int i;
+ Module* modp;
+ ErtsCodeIndex code_ix;
+ ErtsLiteralArea** area_p;
+
+ code_ix = erts_active_code_ix();
+ erts_rlock_old_code(code_ix);
+
+ lit_areas = area_p = erts_dump_lit_areas;
+ num_lit_areas = 0;
+ for (i = 0; i < module_code_size(code_ix); i++) {
+ modp = module_code(i, code_ix);
+ if (modp == NULL) {
+ continue;
+ }
+ if (modp->curr.code_length > 0 &&
+ modp->curr.code_hdr->literal_area) {
+ *area_p++ = modp->curr.code_hdr->literal_area;
+ }
+ if (modp->old.code_length > 0 && modp->old.code_hdr->literal_area) {
+ *area_p++ = modp->old.code_hdr->literal_area;
+ }
+ }
+
+ num_lit_areas = area_p - lit_areas;
+ ASSERT(num_lit_areas <= erts_dump_num_lit_areas);
+ for (i = 0; i < num_lit_areas; i++) {
+ lit_areas[i]->off_heap = 0;
+ }
+
+ qsort(lit_areas, num_lit_areas, sizeof(ErtsLiteralArea *),
+ compare_areas);
+
+ qsort(erts_persistent_areas, erts_num_persistent_areas,
+ sizeof(ErtsLiteralArea *), compare_areas);
+
+ erts_runlock_old_code(code_ix);
+}
+
+static int search_areas(const void * a, const void * b) {
+ Eterm* key = (Eterm *) a;
+ ErtsLiteralArea** b_p = (ErtsLiteralArea **) b;
+ if (key < b_p[0]->start) {
+ return -1;
+ } else if (b_p[0]->end <= key) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void mark_literal(Eterm* ptr)
+{
+ ErtsLiteralArea** ap;
+
+ ap = bsearch(ptr, lit_areas, num_lit_areas, sizeof(ErtsLiteralArea*),
+ search_areas);
+ if (ap == 0) {
+ ap = bsearch(ptr, erts_persistent_areas,
+ erts_num_persistent_areas,
+ sizeof(ErtsLiteralArea*),
+ search_areas);
+ }
+
+
+ /*
+ * If the literal was created by native code, this search will not
+ * find it and ap will be NULL.
+ */
+
+ if (ap) {
+ ap[0]->off_heap = (struct erl_off_heap_header *) 1;
+ }
+}
+
+static void
+dump_literals(fmtfn_t to, void *to_arg)
+{
+ ErtsCodeIndex code_ix;
+ int i;
+ Uint idx;
+
+ code_ix = erts_active_code_ix();
+ erts_rlock_old_code(code_ix);
+
+ erts_print(to, to_arg, "=literals\n");
+ for (i = 0; i < num_lit_areas; i++) {
+ if (lit_areas[i]->off_heap) {
+ dump_module_literals(to, to_arg, lit_areas[i]);
+ }
+ }
+
+ erts_runlock_old_code(code_ix);
+
+ for (idx = 0; idx < erts_num_persistent_areas; idx++) {
+ dump_module_literals(to, to_arg, erts_persistent_areas[idx]);
+ }
+}
+
+static void
+dump_persistent_terms(fmtfn_t to, void *to_arg)
+{
+ Uint idx;
+
+ erts_print(to, to_arg, "=persistent_terms\n");
+
+ for (idx = 0; idx < erts_num_persistent_areas; idx++) {
+ ErtsLiteralArea* ap = erts_persistent_areas[idx];
+ Eterm tuple = make_tuple(ap->start);
+ Eterm* tup_val = tuple_val(tuple);
+
+ dump_element(to, to_arg, tup_val[1]);
+ erts_putc(to, to_arg, '|');
+ dump_element_nl(to, to_arg, tup_val[2]);
+ }
+}
+
+static void
+dump_module_literals(fmtfn_t to, void *to_arg, ErtsLiteralArea* lit_area)
+{
+ Eterm* htop;
+ Eterm* hend;
+
+ htop = lit_area->start;
+ hend = lit_area->end;
+ while (htop < hend) {
+ Eterm w = *htop;
+ Eterm term;
+ Uint size;
+
+ switch (primary_tag(w)) {
+ case TAG_PRIMARY_HEADER:
+ term = make_boxed(htop);
+ erts_print(to, to_arg, PTR_FMT ":", htop);
+ if (is_arity_value(w)) {
+ Uint i;
+ Uint arity = arityval(w);
+
+ erts_print(to, to_arg, "t" ETERM_FMT ":", arity);
+ for (i = 1; i <= arity; i++) {
+ dump_element(to, to_arg, htop[i]);
+ if (i < arity) {
+ erts_putc(to, to_arg, ',');
+ }
+ }
+ erts_putc(to, to_arg, '\n');
+ } else if (w == HEADER_FLONUM) {
+ FloatDef f;
+ char sbuf[31];
+ int i;
+
+ GET_DOUBLE_DATA((htop+1), f);
+ i = sys_double_to_chars(f.fd, sbuf, sizeof(sbuf));
+ sys_memset(sbuf+i, 0, 31-i);
+ erts_print(to, to_arg, "F%X:%s\n", i, sbuf);
+ } else if (_is_bignum_header(w)) {
+ erts_print(to, to_arg, "B%T\n", term);
+ } else if (is_binary_header(w)) {
+ Uint tag = thing_subtag(w);
+ Uint size = binary_size(term);
+
+ if (tag == HEAP_BINARY_SUBTAG) {
+ byte* p;
+
+ erts_print(to, to_arg, "Yh%X:", size);
+ p = binary_bytes(term);
+ erts_print_base64(to, to_arg, p, size);
+ } else if (tag == REFC_BINARY_SUBTAG) {
+ ProcBin* pb = (ProcBin *) binary_val(term);
+ Binary* val = pb->val;
+
+ if (erts_atomic_xchg_nob(&val->intern.refc, 0) != 0) {
+ val->intern.flags = (UWord) all_binaries;
+ all_binaries = val;
+ }
+ erts_print(to, to_arg,
+ "Yc" PTR_FMT ":" PTR_FMT ":" PTR_FMT,
+ val,
+ pb->bytes - (byte *)val->orig_bytes,
+ size);
+ } else if (tag == SUB_BINARY_SUBTAG) {
+ ErlSubBin* Sb = (ErlSubBin *) binary_val(term);
+ Eterm* real_bin;
+ void* val;
+
+ real_bin = boxed_val(Sb->orig);
+ if (thing_subtag(*real_bin) == REFC_BINARY_SUBTAG) {
+ /*
+ * Unvisited REFC_BINARY: Point directly to
+ * the binary.
+ */
+ ProcBin* pb = (ProcBin *) real_bin;
+ val = pb->val;
+ } else {
+ /*
+ * Heap binary or visited REFC binary: Point
+ * to heap binary or ProcBin on the heap.
+ */
+ val = real_bin;
+ }
+ erts_print(to, to_arg,
+ "Ys" PTR_FMT ":" PTR_FMT ":" PTR_FMT,
+ val, Sb->offs, size);
+ }
+ erts_putc(to, to_arg, '\n');
+ } else if (is_map_header(w)) {
+ if (is_flatmap_header(w)) {
+ flatmap_t* fmp = (flatmap_t *) flatmap_val(term);
+ Eterm* values = htop + sizeof(flatmap_t) / sizeof(Eterm);
+ Uint map_size = fmp->size;
+ int i;
+
+ erts_print(to, to_arg, "Mf" ETERM_FMT ":", map_size);
+ dump_element(to, to_arg, fmp->keys);
+ erts_putc(to, to_arg, ':');
+ for (i = 0; i < map_size; i++) {
+ dump_element(to, to_arg, values[i]);
+ if (i < map_size-1) {
+ erts_putc(to, to_arg, ',');
+ }
+ }
+ erts_putc(to, to_arg, '\n');
+ } else {
+ Uint i;
+ Uint sz = 0;
+ Eterm* nodes = htop + 1;
+
+ switch (MAP_HEADER_TYPE(w)) {
+ case MAP_HEADER_TAG_HAMT_HEAD_ARRAY:
+ nodes++;
+ sz = 16;
+ erts_print(to, to_arg, "Mh" ETERM_FMT ":" ETERM_FMT ":",
+ hashmap_size(term), sz);
+ break;
+ case MAP_HEADER_TAG_HAMT_HEAD_BITMAP:
+ nodes++;
+ sz = hashmap_bitcount(MAP_HEADER_VAL(w));
+ erts_print(to, to_arg, "Mh" ETERM_FMT ":" ETERM_FMT ":",
+ hashmap_size(term), sz);
+ break;
+ case MAP_HEADER_TAG_HAMT_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(w));
+ erts_print(to, to_arg, "Mn" ETERM_FMT ":", sz);
+ break;
+ }
+ for (i = 0; i < sz; i++) {
+ dump_element(to, to_arg, nodes[i]);
+ if (i < sz-1) {
+ erts_putc(to, to_arg, ',');
+ }
+ }
+ erts_putc(to, to_arg, '\n');
+ }
+ } else {
+ /* Dump everything else in the external format */
+ dump_externally(to, to_arg, term);
+ erts_putc(to, to_arg, '\n');
+ }
+ size = 1 + header_arity(w);
+ switch (w & _HEADER_SUBTAG_MASK) {
+ case MAP_SUBTAG:
+ if (is_flatmap_header(w)) {
+ size += 1 + flatmap_get_size(htop);
+ } else {
+ size += hashmap_bitcount(MAP_HEADER_VAL(w));
+ }
+ break;
+ case SUB_BINARY_SUBTAG:
+ size += 1;
+ break;
+ }
+ break;
+ default:
+ ASSERT(!is_header(htop[1]));
+ erts_print(to, to_arg, PTR_FMT ":l", htop);
+ dump_element(to, to_arg, htop[0]);
+ erts_putc(to, to_arg, '|');
+ dump_element(to, to_arg, htop[1]);
+ erts_putc(to, to_arg, '\n');
+ size = 2;
+ break;
+ }
+ htop += size;
}
}
-void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg)
+void erts_dump_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg)
{
char *s;
switch (erts_process_state2status(psflg)) {
@@ -591,7 +1049,7 @@ void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg)
}
void
-erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg) {
+erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg) {
int i;
@@ -637,8 +1095,8 @@ erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg) {
erts_print(to, to_arg, "FREE"); break;
case ERTS_PSFLG_EXITING:
erts_print(to, to_arg, "EXITING"); break;
- case ERTS_PSFLG_PENDING_EXIT:
- erts_print(to, to_arg, "PENDING_EXIT"); break;
+ case ERTS_PSFLG_UNUSED:
+ erts_print(to, to_arg, "UNUSED"); break;
case ERTS_PSFLG_ACTIVE:
erts_print(to, to_arg, "ACTIVE"); break;
case ERTS_PSFLG_IN_RUNQ:
@@ -649,10 +1107,10 @@ erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg) {
erts_print(to, to_arg, "SUSPENDED"); break;
case ERTS_PSFLG_GC:
erts_print(to, to_arg, "GC"); break;
- case ERTS_PSFLG_BOUND:
- erts_print(to, to_arg, "BOUND"); break;
- case ERTS_PSFLG_TRAP_EXIT:
- erts_print(to, to_arg, "TRAP_EXIT"); break;
+ case ERTS_PSFLG_SYS_TASKS:
+ erts_print(to, to_arg, "SYS_TASKS"); break;
+ case ERTS_PSFLG_SIG_IN_Q:
+ erts_print(to, to_arg, "SIG_IN_Q"); break;
case ERTS_PSFLG_ACTIVE_SYS:
erts_print(to, to_arg, "ACTIVE_SYS"); break;
case ERTS_PSFLG_RUNNING_SYS:
@@ -663,8 +1121,8 @@ erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg) {
erts_print(to, to_arg, "DELAYED_SYS"); break;
case ERTS_PSFLG_OFF_HEAP_MSGQ:
erts_print(to, to_arg, "OFF_HEAP_MSGQ"); break;
- case ERTS_PSFLG_ON_HEAP_MSGQ:
- erts_print(to, to_arg, "ON_HEAP_MSGQ"); break;
+ case ERTS_PSFLG_SIG_Q:
+ erts_print(to, to_arg, "SIG_Q"); break;
case ERTS_PSFLG_DIRTY_CPU_PROC:
erts_print(to, to_arg, "DIRTY_CPU_PROC"); break;
case ERTS_PSFLG_DIRTY_IO_PROC:
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index a69185bc5c..44c7892040 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -56,9 +56,9 @@
* Note that wait flags may be read without the pix lock, but
* it is important that wait flags only are modified when the pix
* lock is held.
- * This implementation assumes that erts_smp_atomic_or_retold()
+ * This implementation assumes that erts_atomic_or_retold()
* provides necessary memorybarriers for a lock operation, and that
- * erts_smp_atomic_and_retold() provides necessary memorybarriers
+ * erts_atomic_and_retold() provides necessary memorybarriers
* for an unlock operation.
*/
@@ -69,7 +69,6 @@
#include "erl_process.h"
#include "erl_thr_progress.h"
-#ifdef ERTS_SMP
#if ERTS_PROC_LOCK_OWN_IMPL
@@ -102,7 +101,6 @@ static void cleanup_tse(void);
#ifdef ERTS_ENABLE_LOCK_CHECK
static struct {
Sint16 proc_lock_main;
- Sint16 proc_lock_link;
Sint16 proc_lock_msgq;
Sint16 proc_lock_btm;
Sint16 proc_lock_status;
@@ -112,21 +110,13 @@ static struct {
erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
-#ifdef ERTS_ENABLE_LOCK_COUNT
-static void lcnt_enable_proc_lock_count(Process *proc, int enable);
-#endif
-
void
erts_init_proc_lock(int cpus)
{
int i;
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x(&erts_pix_locks[i].u.mtx,
- "pix_lock", make_small(i), 1);
-#else
- erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
-#endif
+ erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
}
#if ERTS_PROC_LOCK_OWN_IMPL
erts_thr_install_exit_handler(cleanup_tse);
@@ -149,7 +139,6 @@ erts_init_proc_lock(int cpus)
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
- lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq");
lc_id.proc_lock_btm = erts_lc_get_lock_order_id("proc_btm");
lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
@@ -472,7 +461,7 @@ wait_for_locks(Process *p,
}
/*
- * erts_proc_lock_failed() is called when erts_smp_proc_lock()
+ * erts_proc_lock_failed() is called when erts_proc_lock()
* wasn't able to lock all locks. We may need to transfer locks
* to waiters and wait for our turn on locks.
*
@@ -551,7 +540,7 @@ erts_proc_lock_failed(Process *p,
}
/*
- * erts_proc_unlock_failed() is called when erts_smp_proc_unlock()
+ * erts_proc_unlock_failed() is called when erts_proc_unlock()
* wasn't able to unlock all locks. We may need to transfer locks
* to waiters.
*/
@@ -717,7 +706,7 @@ proc_safelock(int is_managed,
refc1 = 1;
erts_proc_inc_refc(p1);
}
- erts_smp_proc_unlock(p1, unlock_locks);
+ erts_proc_unlock(p1, unlock_locks);
}
unlock_locks = unlock_mask & have_locks2;
if (unlock_locks) {
@@ -727,7 +716,7 @@ proc_safelock(int is_managed,
refc2 = 1;
erts_proc_inc_refc(p2);
}
- erts_smp_proc_unlock(p2, unlock_locks);
+ erts_proc_unlock(p2, unlock_locks);
}
}
@@ -758,7 +747,7 @@ proc_safelock(int is_managed,
if (need_locks2 & lock)
lock_no--;
locks = need_locks1 & lock_mask;
- erts_smp_proc_lock(p1, locks);
+ erts_proc_lock(p1, locks);
have_locks1 |= locks;
need_locks1 &= ~locks;
}
@@ -769,7 +758,7 @@ proc_safelock(int is_managed,
lock = (1 << ++lock_no);
}
locks = need_locks2 & lock_mask;
- erts_smp_proc_lock(p2, locks);
+ erts_proc_lock(p2, locks);
have_locks2 |= locks;
need_locks2 &= ~locks;
}
@@ -906,7 +895,7 @@ erts_pid2proc_opt(Process *c_p,
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
{
/* Try a quick trylock to grab all the locks we need. */
- busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);
+ busy = (int) erts_proc_raw_trylock__(proc, need_locks);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK)
erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__);
@@ -944,7 +933,7 @@ erts_pid2proc_opt(Process *c_p,
erts_proc_inc_refc(proc);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
- erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
+ erts_lcnt_proc_lock_unacquire(&proc->lock, lcnt_locks);
#endif
managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED;
@@ -984,7 +973,7 @@ erts_pid2proc_opt(Process *c_p,
: (proc
!= (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) {
- erts_smp_proc_unlock(proc, need_locks);
+ erts_proc_unlock(proc, need_locks);
if (flags & ERTS_P2P_FLG_INC_REFC)
dec_refc_proc = proc;
@@ -1006,6 +995,37 @@ erts_pid2proc_opt(Process *c_p,
return proc;
}
+static ERTS_INLINE
+Process *proc_lookup_inc_refc(Eterm pid, int allow_exit)
+{
+ Process *proc;
+ ErtsThrPrgrDelayHandle dhndl;
+
+ dhndl = erts_thr_progress_unmanaged_delay();
+
+ proc = erts_proc_lookup_raw(pid);
+ if (proc) {
+ if (!allow_exit && ERTS_PROC_IS_EXITING(proc))
+ proc = NULL;
+ else
+ erts_proc_inc_refc(proc);
+ }
+
+ erts_thr_progress_unmanaged_continue(dhndl);
+
+ return proc;
+}
+
+Process *erts_proc_lookup_inc_refc(Eterm pid)
+{
+ return proc_lookup_inc_refc(pid, 0);
+}
+
+Process *erts_proc_lookup_raw_inc_refc(Eterm pid)
+{
+ return proc_lookup_inc_refc(pid, 1);
+}
+
void
erts_proc_lock_init(Process *p)
{
@@ -1015,7 +1035,7 @@ erts_proc_lock_init(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL
/* We always start with all locks locked */
#if ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_smp_atomic32_init_nob(&p->lock.flags,
+ erts_atomic32_init_nob(&p->lock.flags,
(erts_aint32_t) ERTS_PROC_LOCKS_ALL);
#else
p->lock.flags = ERTS_PROC_LOCKS_ALL;
@@ -1027,39 +1047,32 @@ erts_proc_lock_init(Process *p)
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
-#ifdef ERTS_ENABLE_LOCK_COUNT
- int do_lock_count = 1;
-#else
- int do_lock_count = 0;
-#endif
-
- erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.main, "proc_main", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.main.lc);
#endif
- erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id, do_lock_count);
- ethr_mutex_lock(&p->lock.link.mtx);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_trylock(1, &p->lock.link.lc);
-#endif
- erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.msgq, "proc_msgq", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
- erts_mtx_init_x(&p->lock.btm, "proc_btm", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.btm, "proc_btm", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.btm.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.btm.lc);
#endif
- erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id,
- do_lock_count);
+ erts_mtx_init(&p->lock.status, "proc_status", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
#endif
- erts_mtx_init_x(&p->lock.trace, "proc_trace", p->common.id, do_lock_count);
+ erts_mtx_init(&p->lock.trace, "proc_trace", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.trace.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.trace.lc);
@@ -1067,7 +1080,7 @@ erts_proc_lock_init(Process *p)
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1);
+ erts_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_proc_lock_init(p);
@@ -1081,13 +1094,12 @@ erts_proc_lock_fin(Process *p)
{
#if ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_mtx_destroy(&p->lock.main);
- erts_mtx_destroy(&p->lock.link);
erts_mtx_destroy(&p->lock.msgq);
erts_mtx_destroy(&p->lock.btm);
erts_mtx_destroy(&p->lock.status);
erts_mtx_destroy(&p->lock.trace);
#endif
-#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
+#if defined(ERTS_ENABLE_LOCK_COUNT)
erts_lcnt_proc_lock_destroy(p);
#endif
}
@@ -1096,117 +1108,68 @@ erts_proc_lock_fin(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
-void erts_lcnt_enable_proc_lock_count(int enable) {
- int ix, max = erts_ptab_max(&erts_proc);
- Process *proc = NULL;
- for (ix = 0; ix < max; ++ix) {
- if ((proc = erts_pix2proc(ix)) != NULL)
- lcnt_enable_proc_lock_count(proc, enable);
- } /* for all processes */
-}
-
void erts_lcnt_proc_lock_init(Process *p) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) {
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_main));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_link));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_msgq));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_btm));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_status));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_trace));
- } else { /* now the common case */
- Eterm pid = (p->common.id != ERTS_INVALID_PID) ? p->common.id : NIL;
- erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_btm), "proc_btm", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_status),"proc_status",ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_trace), "proc_trace", ERTS_LCNT_LT_PROCLOCK, pid);
- } /* the lock names should really be aligned to four characters */
+ erts_lcnt_init_ref(&p->lock.lcnt_carrier);
+
+ if(erts_lcnt_check_enabled(ERTS_LOCK_FLAGS_CATEGORY_PROCESS)) {
+ erts_lcnt_enable_proc_lock_count(p, 1);
+ }
} /* logic reversed */
void erts_lcnt_proc_lock_destroy(Process *p) {
- erts_lcnt_destroy_lock(&(p->lock.lcnt_main));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_link));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_msgq));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_btm));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_status));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_trace));
+ erts_lcnt_uninstall(&p->lock.lcnt_carrier);
}
-static void lcnt_enable_proc_lock_count(Process *proc, int enable) {
- if (enable) {
- if (!ERTS_LCNT_LOCK_TYPE(&(proc->lock.lcnt_main))) {
- erts_lcnt_proc_lock_init(proc);
- }
- }
- else {
- if (ERTS_LCNT_LOCK_TYPE(&(proc->lock.lcnt_main))) {
- erts_lcnt_proc_lock_destroy(proc);
- }
+void erts_lcnt_enable_proc_lock_count(Process *proc, int enable) {
+ if(proc->common.id == ERTS_INVALID_PID) {
+ /* Locks without an id are more trouble than they're worth; there's no
+ * way to look them up and we can't track them with _STATIC since it's
+ * too early to tell whether we're a system process (proc->static_flags
+ * hasn't been not set yet). */
+ } else if(!enable) {
+ erts_lcnt_proc_lock_destroy(proc);
+ } else if(!erts_lcnt_check_ref_installed(&proc->lock.lcnt_carrier)) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+
+ carrier = erts_lcnt_create_lock_info_carrier(ERTS_LCNT_PROCLOCK_COUNT);
+
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN,
+ "proc_main", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ,
+ "proc_msgq", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM,
+ "proc_btm", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS,
+ "proc_status",proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE,
+ "proc_trace", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+
+ erts_lcnt_install(&proc->lock.lcnt_carrier, carrier);
}
}
-void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock(&(lock->lcnt_trace)); }
-}
+void erts_lcnt_update_process_locks(int enable) {
+ int i, max;
-void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
- char *file, unsigned int line) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) {
- erts_lcnt_lock_post_x(&(lock->lcnt_main), file, line);
- }
- if (locks & ERTS_PROC_LOCK_LINK) {
- erts_lcnt_lock_post_x(&(lock->lcnt_link), file, line);
- }
- if (locks & ERTS_PROC_LOCK_MSGQ) {
- erts_lcnt_lock_post_x(&(lock->lcnt_msgq), file, line);
- }
- if (locks & ERTS_PROC_LOCK_BTM) {
- erts_lcnt_lock_post_x(&(lock->lcnt_btm), file, line);
- }
- if (locks & ERTS_PROC_LOCK_STATUS) {
- erts_lcnt_lock_post_x(&(lock->lcnt_status), file, line);
- }
- if (locks & ERTS_PROC_LOCK_TRACE) {
- erts_lcnt_lock_post_x(&(lock->lcnt_trace), file, line);
- }
-}
+ max = erts_ptab_max(&erts_proc);
-void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock_unaquire(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock_unaquire(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock_unaquire(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock_unaquire(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock_unaquire(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock_unaquire(&(lock->lcnt_trace)); }
-}
+ for(i = 0; i < max; i++) {
+ int delay_handle;
+ Process *proc;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+ proc = erts_pix2proc(i);
+
+ if(proc != NULL) {
+ erts_lcnt_enable_proc_lock_count(proc, enable);
+ }
-void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_unlock(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_unlock(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_unlock(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_unlock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_unlock(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_unlock(&(lock->lcnt_trace)); }
+ if(delay_handle != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ }
+ }
}
-void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_trylock(&(lock->lcnt_main), res); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_trylock(&(lock->lcnt_link), res); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_trylock(&(lock->lcnt_msgq), res); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_trylock(&(lock->lcnt_btm), res); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_trylock(&(lock->lcnt_status), res); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_trylock(&(lock->lcnt_trace), res); }
-} /* reversed logic */
+
#endif /* ERTS_ENABLE_LOCK_COUNT */
@@ -1221,15 +1184,11 @@ erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_lock_x(&lck,file,line);
}
- if (locks & ERTS_PROC_LOCK_LINK) {
- lck.id = lc_id.proc_lock_link;
- erts_lc_lock_x(&lck,file,line);
- }
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
erts_lc_lock_x(&lck,file,line);
@@ -1254,15 +1213,11 @@ erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_trylock_x(locked, &lck, file, line);
}
- if (locks & ERTS_PROC_LOCK_LINK) {
- lck.id = lc_id.proc_lock_link;
- erts_lc_trylock_x(locked, &lck, file, line);
- }
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
erts_lc_trylock_x(locked, &lck, file, line);
@@ -1286,7 +1241,7 @@ erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_unlock(&lck);
@@ -1303,10 +1258,6 @@ erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_msgq;
erts_lc_unlock(&lck);
}
- if (locks & ERTS_PROC_LOCK_LINK) {
- lck.id = lc_id.proc_lock_link;
- erts_lc_unlock(&lck);
- }
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_unlock(&lck);
@@ -1321,7 +1272,7 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_might_unlock(&lck);
@@ -1338,10 +1289,6 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_msgq;
erts_lc_might_unlock(&lck);
}
- if (locks & ERTS_PROC_LOCK_LINK) {
- lck.id = lc_id.proc_lock_link;
- erts_lc_might_unlock(&lck);
- }
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_might_unlock(&lck);
@@ -1349,8 +1296,6 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
if (locks & ERTS_PROC_LOCK_MAIN)
erts_lc_might_unlock(&p->lock.main.lc);
- if (locks & ERTS_PROC_LOCK_LINK)
- erts_lc_might_unlock(&p->lock.link.lc);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_lc_might_unlock(&p->lock.msgq.lc);
if (locks & ERTS_PROC_LOCK_BTM)
@@ -1369,15 +1314,11 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_require_lock(&lck, file, line);
}
- if (locks & ERTS_PROC_LOCK_LINK) {
- lck.id = lc_id.proc_lock_link;
- erts_lc_require_lock(&lck, file, line);
- }
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
erts_lc_require_lock(&lck, file, line);
@@ -1397,8 +1338,6 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
if (locks & ERTS_PROC_LOCK_MAIN)
erts_lc_require_lock(&p->lock.main.lc, file, line);
- if (locks & ERTS_PROC_LOCK_LINK)
- erts_lc_require_lock(&p->lock.link.lc, file, line);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_lc_require_lock(&p->lock.msgq.lc, file, line);
if (locks & ERTS_PROC_LOCK_BTM)
@@ -1416,7 +1355,7 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_unrequire_lock(&lck);
@@ -1433,10 +1372,6 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_msgq;
erts_lc_unrequire_lock(&lck);
}
- if (locks & ERTS_PROC_LOCK_LINK) {
- lck.id = lc_id.proc_lock_link;
- erts_lc_unrequire_lock(&lck);
- }
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_unrequire_lock(&lck);
@@ -1444,8 +1379,6 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
if (locks & ERTS_PROC_LOCK_MAIN)
erts_lc_unrequire_lock(&p->lock.main.lc);
- if (locks & ERTS_PROC_LOCK_LINK)
- erts_lc_unrequire_lock(&p->lock.link.lc);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_lc_unrequire_lock(&p->lock.msgq.lc);
if (locks & ERTS_PROC_LOCK_BTM)
@@ -1465,12 +1398,10 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCKS_ALL) {
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN)
lck.id = lc_id.proc_lock_main;
- else if (locks & ERTS_PROC_LOCK_LINK)
- lck.id = lc_id.proc_lock_link;
else if (locks & ERTS_PROC_LOCK_MSGQ)
lck.id = lc_id.proc_lock_msgq;
else if (locks & ERTS_PROC_LOCK_BTM)
@@ -1496,7 +1427,7 @@ void erts_proc_lc_chk_only_proc_main(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL
#define ERTS_PROC_LC_EMPTY_LOCK_INIT \
- ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_PROCLOCK)
+ ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LOCK_TYPE_PROCLOCK)
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks)
@@ -1513,10 +1444,6 @@ void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len].id = lc_id.proc_lock_main;
have_locks[have_locks_len++].extra = p->common.id;
}
- if (locks & ERTS_PROC_LOCK_LINK) {
- have_locks[have_locks_len].id = lc_id.proc_lock_link;
- have_locks[have_locks_len++].extra = p->common.id;
- }
if (locks & ERTS_PROC_LOCK_MSGQ) {
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
have_locks[have_locks_len++].extra = p->common.id;
@@ -1537,8 +1464,6 @@ void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks)
erts_lc_lock_t have_locks[6];
if (locks & ERTS_PROC_LOCK_MAIN)
have_locks[have_locks_len++] = p->lock.main.lc;
- if (locks & ERTS_PROC_LOCK_LINK)
- have_locks[have_locks_len++] = p->lock.link.lc;
if (locks & ERTS_PROC_LOCK_MSGQ)
have_locks[have_locks_len++] = p->lock.msgq.lc;
if (locks & ERTS_PROC_LOCK_BTM)
@@ -1566,10 +1491,6 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len].id = lc_id.proc_lock_main;
have_locks[have_locks_len++].extra = p->common.id;
}
- if (locks & ERTS_PROC_LOCK_LINK) {
- have_locks[have_locks_len].id = lc_id.proc_lock_link;
- have_locks[have_locks_len++].extra = p->common.id;
- }
if (locks & ERTS_PROC_LOCK_MSGQ) {
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
have_locks[have_locks_len++].extra = p->common.id;
@@ -1590,8 +1511,6 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
erts_lc_lock_t have_locks[6];
if (locks & ERTS_PROC_LOCK_MAIN)
have_locks[have_locks_len++] = p->lock.main.lc;
- if (locks & ERTS_PROC_LOCK_LINK)
- have_locks[have_locks_len++] = p->lock.link.lc;
if (locks & ERTS_PROC_LOCK_MSGQ)
have_locks[have_locks_len++] = p->lock.msgq.lc;
if (locks & ERTS_PROC_LOCK_BTM)
@@ -1629,14 +1548,6 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_main;
have_not_locks[have_not_locks_len++].extra = p->common.id;
}
- if (locks & ERTS_PROC_LOCK_LINK) {
- have_locks[have_locks_len].id = lc_id.proc_lock_link;
- have_locks[have_locks_len++].extra = p->common.id;
- }
- else {
- have_not_locks[have_not_locks_len].id = lc_id.proc_lock_link;
- have_not_locks[have_not_locks_len++].extra = p->common.id;
- }
if (locks & ERTS_PROC_LOCK_MSGQ) {
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
have_locks[have_locks_len++].extra = p->common.id;
@@ -1677,10 +1588,6 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len++] = p->lock.main.lc;
else
have_not_locks[have_not_locks_len++] = p->lock.main.lc;
- if (locks & ERTS_PROC_LOCK_LINK)
- have_locks[have_locks_len++] = p->lock.link.lc;
- else
- have_not_locks[have_not_locks_len++] = p->lock.link.lc;
if (locks & ERTS_PROC_LOCK_MSGQ)
have_locks[have_locks_len++] = p->lock.msgq.lc;
else
@@ -1706,48 +1613,42 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
ErtsProcLocks
erts_proc_lc_my_proc_locks(Process *p)
{
- int resv[6];
+ int resv[5];
ErtsProcLocks res = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t locks[6] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
- p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
- ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
+ erts_lc_lock_t locks[5] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_btm,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_trace,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK)};
+ ERTS_LOCK_TYPE_PROCLOCK)};
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t locks[6] = {p->lock.main.lc,
- p->lock.link.lc,
+ erts_lc_lock_t locks[5] = {p->lock.main.lc,
p->lock.msgq.lc,
p->lock.btm.lc,
p->lock.status.lc,
p->lock.trace.lc};
#endif
- erts_lc_have_locks(resv, locks, 6);
+ erts_lc_have_locks(resv, locks, 5);
if (resv[0])
res |= ERTS_PROC_LOCK_MAIN;
if (resv[1])
- res |= ERTS_PROC_LOCK_LINK;
- if (resv[2])
res |= ERTS_PROC_LOCK_MSGQ;
- if (resv[3])
+ if (resv[2])
res |= ERTS_PROC_LOCK_BTM;
- if (resv[4])
+ if (resv[3])
res |= ERTS_PROC_LOCK_STATUS;
- if (resv[5])
+ if (resv[4])
res |= ERTS_PROC_LOCK_TRACE;
return res;
@@ -1756,15 +1657,14 @@ erts_proc_lc_my_proc_locks(Process *p)
void
erts_proc_lc_chk_no_proc_locks(char *file, int line)
{
- int resv[6];
- int ids[6] = {lc_id.proc_lock_main,
- lc_id.proc_lock_link,
+ int resv[5];
+ int ids[5] = {lc_id.proc_lock_main,
lc_id.proc_lock_msgq,
lc_id.proc_lock_btm,
lc_id.proc_lock_status,
lc_id.proc_lock_trace};
- erts_lc_have_lock_ids(resv, ids, 6);
- if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3] || resv[4] || resv[5])) {
+ erts_lc_have_lock_ids(resv, ids, 5);
+ if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3] || resv[4])) {
erts_lc_fail("%s:%d: Thread has process locks locked when expected "
"not to have any process locks locked",
file, line);
@@ -1806,4 +1706,3 @@ check_queue(erts_proc_lock_t *lck)
}
#endif
-#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 2cccf0697a..bd38eca4dc 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -36,7 +36,7 @@
#include "erl_lock_count.h"
#endif
-#include "erl_smp.h"
+#include "erl_threads.h"
#if defined(VALGRIND) || defined(ETHR_DISABLE_NATIVE_IMPLS)
# define ERTS_PROC_LOCK_OWN_IMPL 0
@@ -66,29 +66,33 @@
#endif
-#define ERTS_PROC_LOCK_MAX_BIT 5
+#define ERTS_PROC_LOCK_MAX_BIT 4
typedef erts_aint32_t ErtsProcLocks;
typedef struct erts_proc_lock_t_ {
#if ERTS_PROC_LOCK_OWN_IMPL
#if ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_smp_atomic32_t flags;
+ erts_atomic32_t flags;
#else
ErtsProcLocks flags;
#endif
erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt_main;
- erts_lcnt_lock_t lcnt_link;
- erts_lcnt_lock_t lcnt_msgq;
- erts_lcnt_lock_t lcnt_btm;
- erts_lcnt_lock_t lcnt_status;
- erts_lcnt_lock_t lcnt_trace;
+#if defined(ERTS_ENABLE_LOCK_COUNT) && !ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ /* Each erts_mtx_t has its own lock counter ^ */
+
+ #define ERTS_LCNT_PROCLOCK_IDX_MAIN 0
+ #define ERTS_LCNT_PROCLOCK_IDX_MSGQ 1
+ #define ERTS_LCNT_PROCLOCK_IDX_BTM 2
+ #define ERTS_LCNT_PROCLOCK_IDX_STATUS 3
+ #define ERTS_LCNT_PROCLOCK_IDX_TRACE 4
+
+ #define ERTS_LCNT_PROCLOCK_COUNT 5
+
+ erts_lcnt_ref_t lcnt_carrier;
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_mtx_t main;
- erts_mtx_t link;
erts_mtx_t msgq;
erts_mtx_t btm;
erts_mtx_t status;
@@ -97,7 +101,7 @@ typedef struct erts_proc_lock_t_ {
# error "no implementation"
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
- erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
+ erts_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
#endif
} erts_proc_lock_t;
@@ -112,27 +116,18 @@ typedef struct erts_proc_lock_t_ {
#define ERTS_PROC_LOCK_MAIN (((ErtsProcLocks) 1) << 0)
/*
- * Link lock:
- * Protects the following fields in the process structure:
- * * nlinks
- * * monitors
- * * suspend_monitors
- */
-#define ERTS_PROC_LOCK_LINK (((ErtsProcLocks) 1) << 1)
-
-/*
* Message queue lock:
* Protects the following fields in the process structure:
* * msg_inq
*/
-#define ERTS_PROC_LOCK_MSGQ (((ErtsProcLocks) 1) << 2)
+#define ERTS_PROC_LOCK_MSGQ (((ErtsProcLocks) 1) << 1)
/*
* Bif timer lock:
* Protects the following fields in the process structure:
* * bif_timers
*/
-#define ERTS_PROC_LOCK_BTM (((ErtsProcLocks) 1) << 3)
+#define ERTS_PROC_LOCK_BTM (((ErtsProcLocks) 1) << 2)
/*
* Status lock:
@@ -142,7 +137,7 @@ typedef struct erts_proc_lock_t_ {
* * sys_tasks
* * ...
*/
-#define ERTS_PROC_LOCK_STATUS (((ErtsProcLocks) 1) << 4)
+#define ERTS_PROC_LOCK_STATUS (((ErtsProcLocks) 1) << 3)
/*
* Trace message lock:
@@ -219,6 +214,10 @@ typedef struct erts_proc_lock_t_ {
#define ERTS_PROC_LOCKS_ALL_MINOR (ERTS_PROC_LOCKS_ALL \
& ~ERTS_PROC_LOCK_MAIN)
+/* All locks we first must unlock to lock L */
+#define ERTS_PROC_LOCKS_HIGHER_THAN(L) \
+ (ERTS_PROC_LOCKS_ALL & (~(L) & ~((L)-1)))
+
#define ERTS_PIX_LOCKS_BITS 10
#define ERTS_NO_OF_PIX_LOCKS (1 << ERTS_PIX_LOCKS_BITS)
@@ -233,32 +232,173 @@ typedef struct erts_proc_lock_t_ {
/* Lock counter implemetation */
#ifdef ERTS_ENABLE_LOCK_POSITION
-#define erts_smp_proc_lock__(P,I,L) erts_smp_proc_lock_x__(P,I,L,__FILE__,__LINE__)
-#define erts_smp_proc_lock(P,L) erts_smp_proc_lock_x(P,L,__FILE__,__LINE__)
+#define erts_proc_lock__(P,I,L) erts_proc_lock_x__(P,I,L,__FILE__,__LINE__)
+#define erts_proc_lock(P,L) erts_proc_lock_x(P,L,__FILE__,__LINE__)
#endif
-#if defined(ERTS_SMP) && defined (ERTS_ENABLE_LOCK_COUNT)
+#if defined (ERTS_ENABLE_LOCK_COUNT)
void erts_lcnt_proc_lock_init(Process *p);
void erts_lcnt_proc_lock_destroy(Process *p);
+
+ERTS_GLB_INLINE
void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, char *file, unsigned int line);
-void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_unacquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res);
-void erts_lcnt_enable_proc_lock_count(int enable);
+void erts_lcnt_enable_proc_lock_count(Process *proc, int enable);
+void erts_lcnt_update_process_locks(int enable);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
+ char *file, unsigned int line) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE, file, line);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_unacquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN, res);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ, res);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM, res);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS, res);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE, res);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+} /* reversed logic */
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* ERTS_ENABLE_LOCK_COUNT*/
/* --- Process lock checking ----------------------------------------------- */
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
-#define ERTS_SMP_CHK_NO_PROC_LOCKS \
+#if defined(ERTS_ENABLE_LOCK_CHECK)
+#define ERTS_CHK_NO_PROC_LOCKS \
erts_proc_lc_chk_no_proc_locks(__FILE__, __LINE__)
-#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \
+#define ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \
erts_proc_lc_chk_only_proc_main((P))
void erts_proc_lc_lock(Process *p, ErtsProcLocks locks,
char *file, unsigned int line);
@@ -277,8 +417,8 @@ void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks,
char* file, unsigned int line);
void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
#else
-#define ERTS_SMP_CHK_NO_PROC_LOCKS
-#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P)
+#define ERTS_CHK_NO_PROC_LOCKS
+#define ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P)
#endif
#endif /* #ifndef ERTS_PROC_LOCK_LOCK_CHECK__ */
@@ -289,7 +429,6 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
#ifndef ERTS_PROCESS_LOCK_H__
#define ERTS_PROCESS_LOCK_H__
-#ifdef ERTS_SMP
typedef struct {
union {
@@ -306,21 +445,21 @@ typedef struct {
#if ERTS_PROC_LOCK_ATOMIC_IMPL
#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) \
- ((ErtsProcLocks) erts_smp_atomic32_read_band_nob(&(L)->flags, \
+ ((ErtsProcLocks) erts_atomic32_read_band_nob(&(L)->flags, \
(erts_aint32_t) (MSK)))
#define ERTS_PROC_LOCK_FLGS_BOR_ACQB_(L, MSK) \
- ((ErtsProcLocks) erts_smp_atomic32_read_bor_acqb(&(L)->flags, \
+ ((ErtsProcLocks) erts_atomic32_read_bor_acqb(&(L)->flags, \
(erts_aint32_t) (MSK)))
#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_acqb(&(L)->flags, \
+ ((ErtsProcLocks) erts_atomic32_cmpxchg_acqb(&(L)->flags, \
(erts_aint32_t) (NEW), \
(erts_aint32_t) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_relb(&(L)->flags, \
+ ((ErtsProcLocks) erts_atomic32_cmpxchg_relb(&(L)->flags, \
(erts_aint32_t) (NEW), \
(erts_aint32_t) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_READ_(L) \
- ((ErtsProcLocks) erts_smp_atomic32_read_nob(&(L)->flags))
+ ((ErtsProcLocks) erts_atomic32_read_nob(&(L)->flags))
#else /* no opt atomic ops */
@@ -391,22 +530,22 @@ ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *);
ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *);
ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *);
-ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p,
+ERTS_GLB_INLINE ErtsProcLocks erts_proc_raw_trylock__(Process *p,
ErtsProcLocks locks);
#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *,
+ERTS_GLB_INLINE void erts_proc_lock_x__(Process *,
erts_pix_lock_t *,
ErtsProcLocks,
char *file, unsigned int line);
#else
-ERTS_GLB_INLINE void erts_smp_proc_lock__(Process *,
+ERTS_GLB_INLINE void erts_proc_lock__(Process *,
erts_pix_lock_t *,
ErtsProcLocks);
#endif
-ERTS_GLB_INLINE void erts_smp_proc_unlock__(Process *,
+ERTS_GLB_INLINE void erts_proc_unlock__(Process *,
erts_pix_lock_t *,
ErtsProcLocks);
-ERTS_GLB_INLINE int erts_smp_proc_trylock__(Process *,
+ERTS_GLB_INLINE int erts_proc_trylock__(Process *,
erts_pix_lock_t *,
ErtsProcLocks);
@@ -434,7 +573,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
}
/*
- * Helper function for erts_smp_proc_lock__ and erts_smp_proc_trylock__.
+ * Helper function for erts_proc_lock__ and erts_proc_trylock__.
*
* Attempts to grab all of 'locks' simultaneously.
*
@@ -447,7 +586,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
* Does not release the pix lock.
*/
ERTS_GLB_INLINE ErtsProcLocks
-erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
+erts_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
{
#if ERTS_PROC_LOCK_OWN_IMPL
ErtsProcLocks expct_lflgs = 0;
@@ -475,9 +614,6 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCK_MAIN)
if (erts_mtx_trylock(&p->lock.main) == EBUSY)
goto busy_main;
- if (locks & ERTS_PROC_LOCK_LINK)
- if (erts_mtx_trylock(&p->lock.link) == EBUSY)
- goto busy_link;
if (locks & ERTS_PROC_LOCK_MSGQ)
if (erts_mtx_trylock(&p->lock.msgq) == EBUSY)
goto busy_msgq;
@@ -503,9 +639,6 @@ busy_btm:
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_mtx_unlock(&p->lock.msgq);
busy_msgq:
- if (locks & ERTS_PROC_LOCK_LINK)
- erts_mtx_unlock(&p->lock.link);
-busy_link:
if (locks & ERTS_PROC_LOCK_MAIN)
erts_mtx_unlock(&p->lock.main);
busy_main:
@@ -516,12 +649,12 @@ busy_main:
ERTS_GLB_INLINE void
#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_proc_lock_x__(Process *p,
+erts_proc_lock_x__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks,
char *file, unsigned int line)
#else
-erts_smp_proc_lock__(Process *p,
+erts_proc_lock__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks)
#endif
@@ -543,7 +676,7 @@ erts_smp_proc_lock__(Process *p,
erts_proc_lc_lock(p, locks, file, line);
#endif
- old_lflgs = erts_smp_proc_raw_trylock__(p, locks);
+ old_lflgs = erts_proc_raw_trylock__(p, locks);
if (old_lflgs != 0) {
/*
@@ -576,8 +709,6 @@ erts_smp_proc_lock__(Process *p,
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
if (locks & ERTS_PROC_LOCK_MAIN)
erts_mtx_lock(&p->lock.main);
- if (locks & ERTS_PROC_LOCK_LINK)
- erts_mtx_lock(&p->lock.link);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_mtx_lock(&p->lock.msgq);
if (locks & ERTS_PROC_LOCK_BTM)
@@ -595,7 +726,7 @@ erts_smp_proc_lock__(Process *p,
}
ERTS_GLB_INLINE void
-erts_smp_proc_unlock__(Process *p,
+erts_proc_unlock__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks)
{
@@ -679,8 +810,6 @@ erts_smp_proc_unlock__(Process *p,
erts_mtx_unlock(&p->lock.btm);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_mtx_unlock(&p->lock.msgq);
- if (locks & ERTS_PROC_LOCK_LINK)
- erts_mtx_unlock(&p->lock.link);
if (locks & ERTS_PROC_LOCK_MAIN)
erts_mtx_unlock(&p->lock.main);
#endif
@@ -688,7 +817,7 @@ erts_smp_proc_unlock__(Process *p,
}
ERTS_GLB_INLINE int
-erts_smp_proc_trylock__(Process *p,
+erts_proc_trylock__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks)
{
@@ -709,7 +838,7 @@ erts_smp_proc_trylock__(Process *p,
erts_pix_lock(pix_lck);
#endif
- if (erts_smp_proc_raw_trylock__(p, locks) != 0) {
+ if (erts_proc_raw_trylock__(p, locks) != 0) {
/* Didn't get all locks... */
res = EBUSY;
@@ -746,7 +875,7 @@ erts_smp_proc_trylock__(Process *p,
return res;
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- if (erts_smp_proc_raw_trylock__(p, locks) != 0)
+ if (erts_proc_raw_trylock__(p, locks) != 0)
return EBUSY;
else {
#ifdef ERTS_PROC_LOCK_DEBUG
@@ -767,11 +896,11 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
if (locks & lock) {
erts_aint32_t lock_count;
if (locked) {
- lock_count = erts_smp_atomic32_inc_read_nob(&p->lock.locked[i]);
+ lock_count = erts_atomic32_inc_read_nob(&p->lock.locked[i]);
ERTS_LC_ASSERT(lock_count == 1);
}
else {
- lock_count = erts_smp_atomic32_dec_read_nob(&p->lock.locked[i]);
+ lock_count = erts_atomic32_dec_read_nob(&p->lock.locked[i]);
ERTS_LC_ASSERT(lock_count == 0);
}
}
@@ -781,18 +910,20 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* ERTS_SMP */
#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE void erts_smp_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line);
+ERTS_GLB_INLINE void erts_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line);
#else
-ERTS_GLB_INLINE void erts_smp_proc_lock(Process *, ErtsProcLocks);
+ERTS_GLB_INLINE void erts_proc_lock(Process *, ErtsProcLocks);
#endif
-ERTS_GLB_INLINE void erts_smp_proc_unlock(Process *, ErtsProcLocks);
-ERTS_GLB_INLINE int erts_smp_proc_trylock(Process *, ErtsProcLocks);
+ERTS_GLB_INLINE void erts_proc_unlock(Process *, ErtsProcLocks);
+ERTS_GLB_INLINE int erts_proc_trylock(Process *, ErtsProcLocks);
ERTS_GLB_INLINE void erts_proc_inc_refc(Process *);
ERTS_GLB_INLINE void erts_proc_dec_refc(Process *);
+ERTS_GLB_INLINE void erts_proc_dec_refc_free_func(Process *p,
+ void (*func)(int, void *),
+ void *arg);
ERTS_GLB_INLINE void erts_proc_add_refc(Process *, Sint);
ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *);
@@ -800,94 +931,91 @@ ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *);
ERTS_GLB_INLINE void
#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line)
+erts_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line)
#else
-erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
+erts_proc_lock(Process *p, ErtsProcLocks locks)
#endif
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_smp_proc_lock_x__(p,
+#if defined(ERTS_ENABLE_LOCK_POSITION)
+ erts_proc_lock_x__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
ERTS_PID2PIXLOCK(p->common.id),
#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
locks, file, line);
-#elif defined(ERTS_SMP)
- erts_smp_proc_lock__(p,
+#else
+ erts_proc_lock__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
ERTS_PID2PIXLOCK(p->common.id),
#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
locks);
-#endif /*ERTS_SMP*/
+#endif /*ERTS_ENABLE_LOCK_POSITION*/
}
ERTS_GLB_INLINE void
-erts_smp_proc_unlock(Process *p, ErtsProcLocks locks)
+erts_proc_unlock(Process *p, ErtsProcLocks locks)
{
-#ifdef ERTS_SMP
- erts_smp_proc_unlock__(p,
+ erts_proc_unlock__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
-#endif
}
ERTS_GLB_INLINE int
-erts_smp_proc_trylock(Process *p, ErtsProcLocks locks)
+erts_proc_trylock(Process *p, ErtsProcLocks locks)
{
-#ifndef ERTS_SMP
- return 0;
-#else
- return erts_smp_proc_trylock__(p,
+ return erts_proc_trylock__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
-#endif
}
ERTS_GLB_INLINE void erts_proc_inc_refc(Process *p)
{
- ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
-#ifdef ERTS_SMP
+ ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
erts_ptab_atmc_inc_refc(&p->common);
-#else
- erts_ptab_inc_refc(&p->common);
-#endif
}
ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p)
{
Sint referred;
- ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
-#ifdef ERTS_SMP
+ ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
referred = erts_ptab_atmc_dec_test_refc(&p->common);
-#else
- referred = erts_ptab_dec_test_refc(&p->common);
-#endif
if (!referred) {
ASSERT(ERTS_PROC_IS_EXITING(p));
erts_free_proc(p);
}
}
+ERTS_GLB_INLINE void erts_proc_dec_refc_free_func(Process *p,
+ void (*func)(int, void *),
+ void *arg)
+{
+ Sint referred;
+ ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
+ referred = erts_ptab_atmc_dec_test_refc(&p->common);
+ if (!referred) {
+ ASSERT(ERTS_PROC_IS_EXITING(p));
+ (*func)(!0, arg);
+ erts_free_proc(p);
+ (*func)(0, arg);
+ }
+}
+
ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc)
{
Sint referred;
- ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
-#ifdef ERTS_SMP
+ ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
referred = erts_ptab_atmc_add_test_refc(&p->common, add_refc);
-#else
- referred = erts_ptab_add_test_refc(&p->common, add_refc);
-#endif
if (!referred) {
ASSERT(ERTS_PROC_IS_EXITING(p));
erts_free_proc(p);
@@ -896,17 +1024,12 @@ ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc)
ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *p)
{
- ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
-#ifdef ERTS_SMP
+ ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
return erts_ptab_atmc_read_refc(&p->common);
-#else
- return erts_ptab_read_refc(&p->common);
-#endif
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#ifdef ERTS_SMP
void erts_proc_lock_init(Process *);
void erts_proc_lock_fin(Process *);
void erts_proc_safelock(Process *a_proc,
@@ -915,7 +1038,6 @@ void erts_proc_safelock(Process *a_proc,
Process *b_proc,
ErtsProcLocks b_have_locks,
ErtsProcLocks b_need_locks);
-#endif
/*
* --- Process table lookup ------------------------------------------------
@@ -940,14 +1062,13 @@ void erts_proc_safelock(Process *a_proc,
#define erts_pid2proc(PROC, HL, PID, NL) \
erts_pid2proc_opt((PROC), (HL), (PID), (NL), 0)
+Process *erts_proc_lookup_inc_refc(Eterm pid);
+Process *erts_proc_lookup_raw_inc_refc(Eterm pid);
ERTS_GLB_INLINE Process *erts_pix2proc(int ix);
ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid);
ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid);
-#ifndef ERTS_SMP
-ERTS_GLB_INLINE
-#endif
Process *erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -964,7 +1085,7 @@ ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid)
{
Process *proc;
- ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
+ ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying());
if (is_not_internal_pid(pid))
return NULL;
@@ -984,25 +1105,6 @@ ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid)
return proc;
}
-#ifndef ERTS_SMP
-ERTS_GLB_INLINE Process *
-erts_pid2proc_opt(Process *c_p_unused,
- ErtsProcLocks c_p_have_locks_unused,
- Eterm pid,
- ErtsProcLocks pid_need_locks_unused,
- int flags)
-{
- Process *proc = erts_proc_lookup_raw(pid);
- if (!proc)
- return NULL;
- if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
- && ERTS_PROC_IS_EXITING(proc))
- return NULL;
- if (flags & ERTS_P2P_FLG_INC_REFC)
- erts_proc_inc_refc(proc);
- return proc;
-}
-#endif /* !ERTS_SMP */
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index 22830a19c4..38c095fb4a 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -284,31 +284,31 @@ struct ErtsPTabListBifData_ {
static ERTS_INLINE void
last_data_init_nob(ErtsPTab *ptab, Uint64 val)
{
- erts_smp_atomic64_init_nob(&ptab->vola.tile.last_data, (erts_aint64_t) val);
+ erts_atomic64_init_nob(&ptab->vola.tile.last_data, (erts_aint64_t) val);
}
static ERTS_INLINE void
last_data_set_relb(ErtsPTab *ptab, Uint64 val)
{
- erts_smp_atomic64_set_relb(&ptab->vola.tile.last_data, (erts_aint64_t) val);
+ erts_atomic64_set_relb(&ptab->vola.tile.last_data, (erts_aint64_t) val);
}
static ERTS_INLINE Uint64
last_data_read_nob(ErtsPTab *ptab)
{
- return (Uint64) erts_smp_atomic64_read_nob(&ptab->vola.tile.last_data);
+ return (Uint64) erts_atomic64_read_nob(&ptab->vola.tile.last_data);
}
static ERTS_INLINE Uint64
last_data_read_acqb(ErtsPTab *ptab)
{
- return (Uint64) erts_smp_atomic64_read_acqb(&ptab->vola.tile.last_data);
+ return (Uint64) erts_atomic64_read_acqb(&ptab->vola.tile.last_data);
}
static ERTS_INLINE Uint64
last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
{
- return (Uint64) erts_smp_atomic64_cmpxchg_relb(&ptab->vola.tile.last_data,
+ return (Uint64) erts_atomic64_cmpxchg_relb(&ptab->vola.tile.last_data,
(erts_aint64_t) new,
(erts_aint64_t) exp);
}
@@ -346,9 +346,9 @@ ix_to_free_id_data_ix(ErtsPTab *ptab, Uint32 ix)
UWord
erts_ptab_mem_size(ErtsPTab *ptab)
{
- UWord size = ptab->r.o.max*sizeof(erts_smp_atomic_t);
+ UWord size = ptab->r.o.max*sizeof(erts_atomic_t);
if (ptab->r.o.free_id_data)
- size += ptab->r.o.max*sizeof(erts_smp_atomic32_t);
+ size += ptab->r.o.max*sizeof(erts_atomic32_t);
return size;
}
@@ -367,13 +367,14 @@ erts_ptab_init_table(ErtsPTab *ptab,
size_t tab_sz, alloc_sz;
Uint32 bits, cl, cli, ix, ix_per_cache_line, tab_cache_lines;
char *tab_end;
- erts_smp_atomic_t *tab_entry;
- erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
- rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
-
- erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name);
- erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0);
+ erts_atomic_t *tab_entry;
+ erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED;
+
+ erts_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name, NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ erts_atomic32_init_nob(&ptab->vola.tile.count, 0);
last_data_init_nob(ptab, ~((Uint64) 0));
/* A size that is a power of 2 is to prefer performance wise */
@@ -387,20 +388,20 @@ erts_ptab_init_table(ErtsPTab *ptab,
ptab->r.o.element_size = element_size;
ptab->r.o.max = size;
- tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic_t));
+ tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic_t));
alloc_sz = tab_sz;
if (!legacy)
- alloc_sz += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t));
+ alloc_sz += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic32_t));
ptab->r.o.tab = erts_alloc_permanent_cache_aligned(atype, alloc_sz);
tab_end = ((char *) ptab->r.o.tab) + tab_sz;
tab_entry = ptab->r.o.tab;
while (tab_end > ((char *) tab_entry)) {
- erts_smp_atomic_init_nob(tab_entry, ERTS_AINT_NULL);
+ erts_atomic_init_nob(tab_entry, ERTS_AINT_NULL);
tab_entry++;
}
tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE;
- ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic_t));
+ ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_atomic_t));
ASSERT((ptab->r.o.max & (ptab->r.o.max - 1)) == 0); /* power of 2 */
ASSERT((ix_per_cache_line & (ix_per_cache_line - 1)) == 0); /* power of 2 */
ASSERT((tab_cache_lines & (tab_cache_lines - 1)) == 0); /* power of 2 */
@@ -428,11 +429,11 @@ erts_ptab_init_table(ErtsPTab *ptab,
}
else {
- tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t));
- ptab->r.o.free_id_data = (erts_smp_atomic32_t *) tab_end;
+ tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic32_t));
+ ptab->r.o.free_id_data = (erts_atomic32_t *) tab_end;
tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE;
- ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic32_t));
+ ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_atomic32_t));
ptab->r.o.dix_cl_mask = tab_cache_lines-1;
ptab->r.o.dix_cl_shift = erts_fit_in_bits_int32(ix_per_cache_line-1);
@@ -447,19 +448,19 @@ erts_ptab_init_table(ErtsPTab *ptab,
ix = 0;
for (cl = 0; cl < tab_cache_lines; cl++) {
for (cli = 0; cli < ix_per_cache_line; cli++) {
- erts_smp_atomic32_init_nob(&ptab->r.o.free_id_data[ix],
+ erts_atomic32_init_nob(&ptab->r.o.free_id_data[ix],
cli*tab_cache_lines+cl);
- ASSERT(erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data);
+ ASSERT(erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data);
ix++;
}
}
- erts_smp_atomic32_init_nob(&ptab->vola.tile.aid_ix, -1);
- erts_smp_atomic32_init_nob(&ptab->vola.tile.fid_ix, -1);
+ erts_atomic32_init_nob(&ptab->vola.tile.aid_ix, -1);
+ erts_atomic32_init_nob(&ptab->vola.tile.fid_ix, -1);
}
- erts_smp_interval_init(&ptab->list.data.interval);
+ erts_interval_init(&ptab->list.data.interval);
ptab->list.data.deleted.start = NULL;
ptab->list.data.deleted.end = NULL;
ptab->list.data.chunks = (((ptab->r.o.max - 1)
@@ -474,14 +475,14 @@ erts_ptab_init_table(ErtsPTab *ptab,
* we don't want to shrink the size to ERTS_PTAB_MAX_SIZE/2.
*
* In order to fix this, we insert a pointer from the table
- * to the invalid_element, wich will be interpreted as a
+ * to the invalid_element, which will be interpreted as a
* slot currently being modified. This way we will be able to
* have ERTS_PTAB_MAX_SIZE-1 valid elements in the table while
* still having a table size of the power of 2.
*/
- erts_smp_atomic32_inc_nob(&ptab->vola.tile.count);
+ erts_atomic32_inc_nob(&ptab->vola.tile.count);
pix = erts_ptab_data2pix(ptab, ptab->r.o.invalid_data);
- erts_smp_atomic_set_relb(&ptab->r.o.tab[pix],
+ erts_atomic_set_relb(&ptab->r.o.tab[pix],
(erts_aint_t) ptab->r.o.invalid_element);
}
@@ -505,12 +506,12 @@ erts_ptab_new_element(ErtsPTab *ptab,
erts_ptab_rlock(ptab);
- count = erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.count);
+ count = erts_atomic32_inc_read_acqb(&ptab->vola.tile.count);
if (count > ptab->r.o.max) {
while (1) {
erts_aint32_t act_count;
- act_count = erts_smp_atomic32_cmpxchg_relb(&ptab->vola.tile.count,
+ act_count = erts_atomic32_cmpxchg_relb(&ptab->vola.tile.count,
count-1,
count);
if (act_count == count) {
@@ -524,14 +525,14 @@ erts_ptab_new_element(ErtsPTab *ptab,
}
ptab_el->u.alive.started_interval
- = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+ = erts_current_interval_nob(erts_ptab_interval(ptab));
if (ptab->r.o.free_id_data) {
do {
- ix = (Uint32) erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.aid_ix);
+ ix = (Uint32) erts_atomic32_inc_read_acqb(&ptab->vola.tile.aid_ix);
ix = ix_to_free_id_data_ix(ptab, ix);
- data = erts_smp_atomic32_xchg_nob(&ptab->r.o.free_id_data[ix],
+ data = erts_atomic32_xchg_nob(&ptab->r.o.free_id_data[ix],
(erts_aint32_t)ptab->r.o.invalid_data);
}while ((Eterm)data == ptab->r.o.invalid_data);
@@ -545,10 +546,10 @@ erts_ptab_new_element(ErtsPTab *ptab,
pix = erts_ptab_data2pix(ptab, (Eterm) data);
#ifdef DEBUG
- ASSERT(ERTS_AINT_NULL == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix],
+ ASSERT(ERTS_AINT_NULL == erts_atomic_xchg_relb(&ptab->r.o.tab[pix],
(erts_aint_t) ptab_el));
#else
- erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
+ erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
#endif
erts_ptab_runlock(ptab);
@@ -562,7 +563,7 @@ erts_ptab_new_element(ErtsPTab *ptab,
restart:
ptab_el->u.alive.started_interval
- = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+ = erts_current_interval_nob(erts_ptab_interval(ptab));
ld = last_data_read_acqb(ptab);
@@ -570,10 +571,10 @@ erts_ptab_new_element(ErtsPTab *ptab,
while (1) {
ld++;
pix = erts_ptab_data2pix(ptab, ERTS_PTAB_LastData2EtermData(ld));
- if (erts_smp_atomic_read_nob(&ptab->r.o.tab[pix])
+ if (erts_atomic_read_nob(&ptab->r.o.tab[pix])
== ERTS_AINT_NULL) {
erts_aint_t val;
- val = erts_smp_atomic_cmpxchg_relb(&ptab->r.o.tab[pix],
+ val = erts_atomic_cmpxchg_relb(&ptab->r.o.tab[pix],
invalid,
ERTS_AINT_NULL);
@@ -620,10 +621,10 @@ erts_ptab_new_element(ErtsPTab *ptab,
/* Move into slot reserved */
#ifdef DEBUG
- ASSERT(invalid == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix],
+ ASSERT(invalid == erts_atomic_xchg_relb(&ptab->r.o.tab[pix],
(erts_aint_t) ptab_el));
#else
- erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
+ erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
#endif
if (rlocked)
@@ -643,7 +644,7 @@ save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el)
sizeof(ErtsPTabDeletedElement));
ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start
&& ptab->list.data.deleted.end);
- ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab));
ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
@@ -653,7 +654,7 @@ save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el)
ptdep->u.element.id = ptab_el->id;
ptdep->u.element.inserted = ptab_el->u.alive.started_interval;
ptdep->u.element.deleted =
- erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+ erts_current_interval_nob(erts_ptab_interval(ptab));
ptab->list.data.deleted.end->next = ptdep;
ptab->list.data.deleted.end = ptdep;
@@ -677,7 +678,7 @@ erts_ptab_delete_element(ErtsPTab *ptab,
pix = erts_ptab_id2pix(ptab, ptab_el->id);
/* *Need* to be an managed thread */
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread());
erts_ptab_rlock(ptab);
maybe_save = ptab->list.data.deleted.end != NULL;
@@ -686,7 +687,7 @@ erts_ptab_delete_element(ErtsPTab *ptab,
erts_ptab_rwlock(ptab);
}
- erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL);
+ erts_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL);
if (ptab->r.o.free_id_data) {
Uint32 prev_data;
@@ -702,17 +703,17 @@ erts_ptab_delete_element(ErtsPTab *ptab,
ASSERT(pix == erts_ptab_data2pix(ptab, data));
do {
- ix = (Uint32) erts_smp_atomic32_inc_read_relb(&ptab->vola.tile.fid_ix);
+ ix = (Uint32) erts_atomic32_inc_read_relb(&ptab->vola.tile.fid_ix);
ix = ix_to_free_id_data_ix(ptab, ix);
- prev_data = erts_smp_atomic32_cmpxchg_nob(&ptab->r.o.free_id_data[ix],
+ prev_data = erts_atomic32_cmpxchg_nob(&ptab->r.o.free_id_data[ix],
data,
ptab->r.o.invalid_data);
}while ((Eterm)prev_data != ptab->r.o.invalid_data);
}
- ASSERT(erts_smp_atomic32_read_nob(&ptab->vola.tile.count) > 0);
- erts_smp_atomic32_dec_relb(&ptab->vola.tile.count);
+ ASSERT(erts_atomic32_read_nob(&ptab->vola.tile.count) > 0);
+ erts_atomic32_dec_relb(&ptab->vola.tile.count);
if (!maybe_save)
erts_ptab_runlock(ptab);
@@ -733,7 +734,7 @@ erts_ptab_delete_element(ErtsPTab *ptab,
* erts_ptab_list() implements BIFs listing the content of the table,
* e.g. erlang:processes/0.
*/
-static void cleanup_ptab_list_bif_data(Binary *bp);
+static int cleanup_ptab_list_bif_data(Binary *bp);
static int ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp);
@@ -771,23 +772,23 @@ erts_ptab_list(Process *c_p, ErtsPTab *ptab)
}
else {
Eterm *hp;
- Eterm magic_bin;
+ Eterm magic_ref;
ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc);
- hp = HAlloc(c_p, PROC_BIN_SIZE);
- ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, ERTS_MAGIC_REF_THING_SIZE);
+ magic_ref = erts_mk_magic_ref(&hp, &MSO(c_p), mbp);
ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp);
ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, trap);
ERTS_BIF_PREP_YIELD2(ret_val,
&ptab_list_continue_export,
c_p,
res_acc,
- magic_bin);
+ magic_ref);
}
return ret_val;
}
-static void
+static int
cleanup_ptab_list_bif_data(Binary *bp)
{
ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(bp);
@@ -875,6 +876,8 @@ cleanup_ptab_list_bif_data(Binary *bp)
ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, return);
ERTS_PTAB_LIST_DBG_CLEANUP(ptlbdp);
+
+ return 1;
}
static int
@@ -924,7 +927,7 @@ ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp)
sizeof(ErtsPTabDeletedElement));
ptlbdp->bif_invocation->ix = -1;
ptlbdp->bif_invocation->u.bif_invocation.interval
- = erts_smp_step_interval_nob(erts_ptab_interval(ptab));
+ = erts_step_interval_nob(erts_ptab_interval(ptab));
ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
ptlbdp->bif_invocation->next = NULL;
@@ -965,12 +968,12 @@ ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp)
locked = 1;
}
- ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab));
ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_table);
if (cix != 0)
ptlbdp->chunk[cix].interval
- = erts_smp_step_interval_nob(erts_ptab_interval(ptab));
+ = erts_step_interval_nob(erts_ptab_interval(ptab));
else if (ptlbdp->bif_invocation)
ptlbdp->chunk[0].interval = *invocation_interval_p;
/* else: interval is irrelevant */
@@ -1287,9 +1290,7 @@ static BIF_RETTYPE ptab_list_continue(BIF_ALIST_2)
res_acc = BIF_ARG_1;
- ERTS_PTAB_LIST_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2));
-
- mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+ mbp = erts_magic_ref2bin(BIF_ARG_2);
ERTS_PTAB_LIST_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_ptab_list_bif_data);
@@ -1330,18 +1331,18 @@ static void assert_ptab_consistency(ErtsPTab *ptab)
int null_slots = 0;
for (ix=0; ix < ptab->r.o.max; ix++) {
- if (erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data) {
+ if (erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data) {
++free_pids;
- data = erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]);
+ data = erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]);
pix = erts_ptab_data2pix(ptab, (Eterm) data);
ASSERT(erts_ptab_pix2intptr_nob(ptab, pix) == ERTS_AINT_NULL);
}
- if (erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]) == ERTS_AINT_NULL) {
+ if (erts_atomic_read_nob(&ptab->r.o.tab[ix]) == ERTS_AINT_NULL) {
++null_slots;
}
}
ASSERT(free_pids == null_slots);
- ASSERT(free_pids == ptab->r.o.max - erts_smp_atomic32_read_nob(&ptab->vola.tile.count));
+ ASSERT(free_pids == ptab->r.o.max - erts_atomic32_read_nob(&ptab->vola.tile.count));
}
#endif
}
@@ -1365,7 +1366,7 @@ erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next)
Uint32 i, max_ix, num, stop_id_ix;
max_ix = ptab->r.o.max - 1;
num = next;
- id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix);
+ id_ix = (Uint32) erts_atomic32_read_nob(&ptab->vola.tile.aid_ix);
for (i=0; i <= max_ix; ++i) {
Uint32 pix;
@@ -1379,26 +1380,26 @@ erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next)
if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) {
++id_ix;
dix = ix_to_free_id_data_ix(ptab, id_ix);
- erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix], num);
+ erts_atomic32_set_nob(&ptab->r.o.free_id_data[dix], num);
ASSERT(pix == erts_ptab_data2pix(ptab, num));
}
}
- erts_smp_atomic32_set_nob(&ptab->vola.tile.fid_ix, id_ix);
+ erts_atomic32_set_nob(&ptab->vola.tile.fid_ix, id_ix);
/* Write invalid_data in rest of free_id_data[]: */
- stop_id_ix = (1 + erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix)) & max_ix;
+ stop_id_ix = (1 + erts_atomic32_read_nob(&ptab->vola.tile.aid_ix)) & max_ix;
while (1) {
id_ix = (id_ix+1) & max_ix;
if (id_ix == stop_id_ix)
break;
dix = ix_to_free_id_data_ix(ptab, id_ix);
- erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix],
+ erts_atomic32_set_nob(&ptab->r.o.free_id_data[dix],
ptab->r.o.invalid_data);
}
}
- id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix) + 1;
+ id_ix = (Uint32) erts_atomic32_read_nob(&ptab->vola.tile.aid_ix) + 1;
dix = ix_to_free_id_data_ix(ptab, id_ix);
- res = (Sint) erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[dix]);
+ res = (Sint) erts_atomic32_read_nob(&ptab->r.o.free_id_data[dix]);
}
else {
/* Deprecated legacy algorithm... */
@@ -1615,11 +1616,11 @@ debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp)
static void
debug_ptab_list_check_del_list(ErtsPTab *ptab)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab));
if (!ptab->list.data.deleted.start)
ERTS_PTAB_LIST_ASSERT(!ptab->list.data.deleted.end);
else {
- Uint64 curr_interval = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+ Uint64 curr_interval = erts_current_interval_nob(erts_ptab_interval(ptab));
Uint64 *prev_x_interval_p = NULL;
ErtsPTabDeletedElement *ptdep;
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
index fecfd96ab0..94f0247492 100644
--- a/erts/emulator/beam/erl_ptab.h
+++ b/erts/emulator/beam/erl_ptab.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -35,7 +35,7 @@
#include "erl_thr_progress.h"
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
#include "erl_alloc.h"
-#include "erl_monitors.h"
+#include "erl_monitor_link.h"
#define ERTS_TRACER(P) ((P)->common.tracer)
#define ERTS_TRACER_MODULE(T) (CAR(list_val(T)))
@@ -44,6 +44,7 @@
#define ERTS_P_LINKS(P) ((P)->common.u.alive.links)
#define ERTS_P_MONITORS(P) ((P)->common.u.alive.monitors)
+#define ERTS_P_LT_MONITORS(P) ((P)->common.u.alive.lt_monitors)
#define IS_TRACED(p) \
(ERTS_TRACER(p) != NIL)
@@ -60,7 +61,7 @@ typedef struct {
} refc;
ErtsTracer tracer;
Uint trace_flags;
- erts_smp_atomic_t timer;
+ erts_atomic_t timer;
union {
/* --- While being alive --- */
struct {
@@ -68,6 +69,7 @@ typedef struct {
struct reg_proc *reg;
ErtsLink *links;
ErtsMonitor *monitors;
+ ErtsMonitor *lt_monitors;
} alive;
/* --- While being released --- */
@@ -78,7 +80,7 @@ typedef struct {
typedef struct ErtsPTabDeletedElement_ ErtsPTabDeletedElement;
typedef struct {
- erts_smp_rwmtx_t rwmtx;
+ erts_rwmtx_t rwmtx;
erts_interval_t interval;
struct {
ErtsPTabDeletedElement *start;
@@ -88,15 +90,15 @@ typedef struct {
} ErtsPTabListData;
typedef struct {
- erts_smp_atomic64_t last_data;
- erts_smp_atomic32_t count;
- erts_smp_atomic32_t aid_ix;
- erts_smp_atomic32_t fid_ix;
+ erts_atomic64_t last_data;
+ erts_atomic32_t count;
+ erts_atomic32_t aid_ix;
+ erts_atomic32_t fid_ix;
} ErtsPTabVolatileData;
typedef struct {
- erts_smp_atomic_t *tab;
- erts_smp_atomic32_t *free_id_data;
+ erts_atomic_t *tab;
+ erts_atomic32_t *free_id_data;
Uint32 max;
Uint32 pix_mask;
Uint32 pix_cl_mask;
@@ -223,8 +225,8 @@ ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab);
ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab);
ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab);
ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab);
-ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab);
-ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_lc_ptab_is_rlocked(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_lc_ptab_is_rwlocked(ErtsPTab *ptab);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -245,7 +247,7 @@ ERTS_GLB_INLINE int
erts_ptab_count(ErtsPTab *ptab)
{
int max = ptab->r.o.max;
- erts_aint32_t res = erts_smp_atomic32_read_nob(&ptab->vola.tile.count);
+ erts_aint32_t res = erts_atomic32_read_nob(&ptab->vola.tile.count);
if (max == ERTS_PTAB_MAX_SIZE) {
max--;
res--;
@@ -352,25 +354,25 @@ erts_ptab_id2data(ErtsPTab *ptab, Eterm id)
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix)
{
ASSERT(0 <= ix && ix < ptab->r.o.max);
- return erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]);
+ return erts_atomic_read_nob(&ptab->r.o.tab[ix]);
}
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix)
{
ASSERT(0 <= ix && ix < ptab->r.o.max);
- return erts_smp_atomic_read_ddrb(&ptab->r.o.tab[ix]);
+ return erts_atomic_read_ddrb(&ptab->r.o.tab[ix]);
}
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix)
{
ASSERT(0 <= ix && ix < ptab->r.o.max);
- return erts_smp_atomic_read_rb(&ptab->r.o.tab[ix]);
+ return erts_atomic_read_rb(&ptab->r.o.tab[ix]);
}
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix)
{
ASSERT(0 <= ix && ix < ptab->r.o.max);
- return erts_smp_atomic_read_acqb(&ptab->r.o.tab[ix]);
+ return erts_atomic_read_acqb(&ptab->r.o.tab[ix]);
}
ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el)
@@ -386,11 +388,9 @@ ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el)
ERTS_GLB_INLINE Sint erts_ptab_atmc_dec_test_refc(ErtsPTabElementCommon *ptab_el)
{
erts_aint_t refc = erts_atomic_dec_read_relb(&ptab_el->refc.atmc);
- ERTS_SMP_LC_ASSERT(refc >= 0);
-#ifdef ERTS_SMP
+ ERTS_LC_ASSERT(refc >= 0);
if (refc == 0)
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
-#endif
return (Sint) refc;
}
@@ -399,7 +399,7 @@ ERTS_GLB_INLINE Sint erts_ptab_atmc_add_test_refc(ErtsPTabElementCommon *ptab_el
{
erts_aint_t refc = erts_atomic_add_read_mb(&ptab_el->refc.atmc,
(erts_aint_t) add_refc);
- ERTS_SMP_LC_ASSERT(refc >= 0);
+ ERTS_LC_ASSERT(refc >= 0);
return (Sint) refc;
}
@@ -417,7 +417,7 @@ ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el)
ERTS_GLB_INLINE Sint erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el)
{
Sint refc = --ptab_el->refc.sint;
- ERTS_SMP_LC_ASSERT(refc >= 0);
+ ERTS_LC_ASSERT(refc >= 0);
return refc;
}
@@ -425,7 +425,7 @@ ERTS_GLB_INLINE Sint erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
Sint add_refc)
{
ptab_el->refc.sint += add_refc;
- ERTS_SMP_LC_ASSERT(ptab_el->refc.sint >= 0);
+ ERTS_LC_ASSERT(ptab_el->refc.sint >= 0);
return (Sint) ptab_el->refc.sint;
}
@@ -436,42 +436,42 @@ ERTS_GLB_INLINE Sint erts_ptab_read_refc(ErtsPTabElementCommon *ptab_el)
ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab)
{
- erts_smp_rwmtx_rlock(&ptab->list.data.rwmtx);
+ erts_rwmtx_rlock(&ptab->list.data.rwmtx);
}
ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab)
{
- return erts_smp_rwmtx_tryrlock(&ptab->list.data.rwmtx);
+ return erts_rwmtx_tryrlock(&ptab->list.data.rwmtx);
}
ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab)
{
- erts_smp_rwmtx_runlock(&ptab->list.data.rwmtx);
+ erts_rwmtx_runlock(&ptab->list.data.rwmtx);
}
ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab)
{
- erts_smp_rwmtx_rwlock(&ptab->list.data.rwmtx);
+ erts_rwmtx_rwlock(&ptab->list.data.rwmtx);
}
ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab)
{
- return erts_smp_rwmtx_tryrwlock(&ptab->list.data.rwmtx);
+ return erts_rwmtx_tryrwlock(&ptab->list.data.rwmtx);
}
ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab)
{
- erts_smp_rwmtx_rwunlock(&ptab->list.data.rwmtx);
+ erts_rwmtx_rwunlock(&ptab->list.data.rwmtx);
}
-ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab)
+ERTS_GLB_INLINE int erts_lc_ptab_is_rlocked(ErtsPTab *ptab)
{
- return erts_smp_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx);
+ return erts_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx);
}
-ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab)
+ERTS_GLB_INLINE int erts_lc_ptab_is_rwlocked(ErtsPTab *ptab)
{
- return erts_smp_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx);
+ return erts_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx);
}
#endif
diff --git a/erts/emulator/beam/erl_rbtree.h b/erts/emulator/beam/erl_rbtree.h
index 5fefaea978..e50abf5cec 100644
--- a/erts/emulator/beam/erl_rbtree.h
+++ b/erts/emulator/beam/erl_rbtree.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2015. All Rights Reserved.
+ * Copyright Ericsson AB 2015-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -50,8 +50,14 @@
* - ERTS_RBT_GET_LEFT(T) - Get left child node.
* - ERTS_RBT_SET_LEFT(T, L) - Set left child node.
* - ERTS_RBT_GET_KEY(T) - Get key of node.
- * - ERTS_RBT_IS_LT(KX, KY) - Is key KX less than key KY?
- * - ERTS_RBT_IS_EQ(KX, KY) - Is key KX equal to key KY?
+ * Either:
+ * - ERTS_RBT_CMP_KEYS(KX, KY) - Compare keys...
+ * or:
+ * - ERTS_RBT_IS_LT(KX, KY) - Is key KX less than key KY?
+ * - ERTS_RBT_IS_EQ(KX, KY) - Is key KX equal to key KY?
+ *
+ * If ERTS_RBT_CMP_KEYS is defined ERTS_RBT_IS_LT and
+ * ERTS_RBT_IS_EQ will be redefined using ERTS_RBT_CMP_KEYS
*
* Optional defines:
*
@@ -105,7 +111,10 @@
* <ERTS_RBT_PREFIX>_rbt_yield_state_t.
*
* The yield state should be statically initialized by
- * ERTS_RBT_YIELD_STAT_INITER.
+ * ERTS_RBT_YIELD_STAT_INITER
+ *
+ * or dynamically initialized with
+ * ERTS_RBT_YIELD_STAT_INIT(<ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate)
*
*
* The following API functions are implemented if corresponding
@@ -178,8 +187,8 @@
* Operate by calling the operator 'op' on each element.
* Order is undefined.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -195,8 +204,8 @@
* Order is undefined. Each element should be destroyed
* by 'op'.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed.
*
* 'arg' is passed as argument to 'op'.
@@ -228,8 +237,8 @@
* Operate by calling the operator 'op' on each element from
* smallest towards larger elements.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -244,8 +253,8 @@
* Operate by calling the operator 'op' on each element from
* largest towards smaller elements.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -296,8 +305,8 @@
* Note that elements are often destroyed in another order
* than the order that the elements are operated on.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -318,8 +327,8 @@
* Note that elements are often destroyed in another order
* than the order that the elements are operated on.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -334,6 +343,15 @@
* Should only be used for debuging.
*/
+#ifdef ERTS_RBT_CMP_KEYS
+
+# undef ERTS_RBT_IS_LT
+# define ERTS_RBT_IS_LT(KX, KY) (ERTS_RBT_CMP_KEYS((KX), (KY)) < 0)
+
+# undef ERTS_RBT_IS_EQ
+# define ERTS_RBT_IS_EQ(KX, KY) (ERTS_RBT_CMP_KEYS((KX), (KY)) == 0)
+
+#endif
/*
* Check that we have all mandatory defines
@@ -393,6 +411,16 @@
# error Missing definition of ERTS_RBT_IS_EQ
#endif
+#undef ERTS_RBT_IS_GT__
+#ifdef ERTS_RBT_CMP_KEYS
+# define ERTS_RBT_IS_GT__(KX, KY) \
+ (ERTS_RBT_CMP_KEYS((KX), (KY)) > 0)
+#else
+# define ERTS_RBT_IS_GT__(KX, KY) \
+ (!ERTS_RBT_IS_LT((KX), (KY)) && !ERTS_RBT_IS_EQ((KX), (KY)))
+
+#endif
+
#if defined(ERTS_RBT_HARD_DEBUG) || defined(DEBUG)
# ifndef ERTS_RBT_DEBUG
# define ERTS_RBT_DEBUG 1
@@ -422,6 +450,13 @@
#ifndef ERTS_RBT_YIELD_STAT_INITER
# define ERTS_RBT_YIELD_STAT_INITER {NULL, 0}
#endif
+#ifndef ERTS_RBT_YIELD_STAT_INIT
+# define ERTS_RBT_YIELD_STAT_INIT(YS) \
+ do { \
+ (YS)->x = NULL; \
+ (YS)->up = 0; \
+ } while (0)
+#endif
#define ERTS_RBT_CONCAT_MACRO_VALUES___(X, Y) \
X ## Y
@@ -476,12 +511,12 @@ typedef struct {
#if defined(ERTS_RBT_HARD_DEBUG) \
&& (defined(ERTS_RBT_WANT_DELETE) \
|| defined(ERTS_RBT_NEED_INSERT__))
-static void ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root);
+static void ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root, ERTS_RBT_T *node);
# define ERTS_RBT_NEED_HDBG_CHECK_TREE__
-# define ERTS_RBT_HDBG_CHECK_TREE__(R) \
- ERTS_RBT_FUNC__(hdbg_check_tree)((R))
+# define ERTS_RBT_HDBG_CHECK_TREE__(R,N) \
+ ERTS_RBT_FUNC__(hdbg_check_tree)((R),(N))
#else
-# define ERTS_RBT_HDBG_CHECK_TREE__(R) ((void) 1)
+# define ERTS_RBT_HDBG_CHECK_TREE__(R,N) ((void) 1)
#endif
#ifdef ERTS_RBT_NEED_ROTATE__
@@ -634,7 +669,7 @@ ERTS_RBT_FUNC__(delete)(ERTS_RBT_T **root, ERTS_RBT_T *n)
ERTS_RBT_T null_x; /* null_x is used to get the fixup started when we
splice out a node without children. */
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, n);
ERTS_RBT_INIT_EMPTY_TNODE(&null_x);
@@ -852,7 +887,7 @@ ERTS_RBT_FUNC__(delete)(ERTS_RBT_T **root, ERTS_RBT_T *n)
}
}
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
}
@@ -982,7 +1017,7 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup)
{
ERTS_RBT_KEY_T kn = ERTS_RBT_GET_KEY(n);
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
ERTS_RBT_INIT_EMPTY_TNODE(n);
@@ -997,19 +1032,30 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup)
ERTS_RBT_T *p, *x = *root;
while (1) {
- ERTS_RBT_KEY_T kx;
+ ERTS_RBT_KEY_T kx = ERTS_RBT_GET_KEY(x);
ERTS_RBT_T *c;
+ int kres;
+#ifdef ERTS_RBT_CMP_KEYS
+ int kcmp = ERTS_RBT_CMP_KEYS(kn, kx);
+ kres = kcmp == 0;
+#else
+ kres = ERTS_RBT_IS_EQ(kn, kx);
+#endif
- kx = ERTS_RBT_GET_KEY(x);
-
- if (lookup && ERTS_RBT_IS_EQ(kn, kx)) {
+ if (lookup && kres) {
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
return x;
}
- if (ERTS_RBT_IS_LT(kn, kx)) {
+#ifdef ERTS_RBT_CMP_KEYS
+ kres = kcmp < 0;
+#else
+ kres = ERTS_RBT_IS_LT(kn, kx);
+#endif
+
+ if (kres) {
c = ERTS_RBT_GET_LEFT(x);
if (!c) {
ERTS_RBT_SET_PARENT(n, x);
@@ -1038,7 +1084,7 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup)
ERTS_RBT_FUNC__(insert_fixup__)(root, n);
}
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, n);
return NULL;
}
@@ -1065,6 +1111,101 @@ ERTS_RBT_FUNC__(insert)(ERTS_RBT_T **root, ERTS_RBT_T *n)
#endif /* ERTS_RBT_WANT_INSERT */
+#ifdef ERTS_RBT_WANT_LOOKUP_CREATE
+static ERTS_INLINE ERTS_RBT_T *
+ERTS_RBT_FUNC__(lookup_create)(ERTS_RBT_T **root,
+ ERTS_RBT_KEY_T kn,
+ ERTS_RBT_T *(*create)(ERTS_RBT_KEY_T, void *),
+ void *arg,
+ int *created)
+{
+ ERTS_RBT_T *n;
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
+
+ if (!*root) {
+ n = (*create)(kn, arg);
+ ERTS_RBT_INIT_EMPTY_TNODE(n);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_EQ(ERTS_RBT_GET_KEY(n), kn));
+ ERTS_RBT_SET_BLACK(n);
+ *root = n;
+ *created = !0;
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(NULL, n);
+#endif
+ }
+ else {
+ ERTS_RBT_T *p, *x = *root;
+
+ while (1) {
+ ERTS_RBT_KEY_T kx = ERTS_RBT_GET_KEY(x);
+ ERTS_RBT_T *c;
+ int kres;
+#ifdef ERTS_RBT_CMP_KEYS
+ int kcmp = ERTS_RBT_CMP_KEYS(kn, kx);
+ kres = kcmp == 0;
+#else
+ kres = ERTS_RBT_IS_EQ(kn, kx);
+#endif
+
+ if (kres) {
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
+
+ *created = 0;
+ return x;
+ }
+
+#ifdef ERTS_RBT_CMP_KEYS
+ kres = kcmp < 0;
+#else
+ kres = ERTS_RBT_IS_LT(kn, kx);
+#endif
+
+ if (kres) {
+ c = ERTS_RBT_GET_LEFT(x);
+ if (!c) {
+ n = (*create)(kn, arg);
+ ERTS_RBT_INIT_EMPTY_TNODE(n);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_EQ(ERTS_RBT_GET_KEY(n), kn));
+ *created = !0;
+ ERTS_RBT_SET_PARENT(n, x);
+ ERTS_RBT_SET_LEFT(x, n);
+ p = x;
+ break;
+ }
+ }
+ else {
+ c = ERTS_RBT_GET_RIGHT(x);
+ if (!c) {
+ n = (*create)(kn, arg);
+ ERTS_RBT_INIT_EMPTY_TNODE(n);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_EQ(ERTS_RBT_GET_KEY(n), kn));
+ *created = !0;
+ ERTS_RBT_SET_PARENT(n, x);
+ ERTS_RBT_SET_RIGHT(x, n);
+ p = x;
+ break;
+ }
+ }
+
+ x = c;
+ }
+
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_EQ(ERTS_RBT_GET_KEY(n), kn));
+ ERTS_RBT_ASSERT(p);
+
+ ERTS_RBT_SET_RED(n);
+ if (ERTS_RBT_IS_RED(p))
+ ERTS_RBT_FUNC__(insert_fixup__)(root, n);
+ }
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, n);
+
+ return n;
+}
+
+#endif /* ERTS_RBT_WANT_LOOKUP_CREATE */
+
#ifdef ERTS_RBT_WANT_LOOKUP
static ERTS_RBT_API_INLINE__ ERTS_RBT_T *
@@ -1078,11 +1219,24 @@ ERTS_RBT_FUNC__(lookup)(ERTS_RBT_T *root, ERTS_RBT_KEY_T key)
while (1) {
ERTS_RBT_KEY_T kx = ERTS_RBT_GET_KEY(x);
ERTS_RBT_T *c;
+ int kres;
+#ifdef ERTS_RBT_CMP_KEYS
+ int kcmp = ERTS_RBT_CMP_KEYS(key, kx);
+ kres = kcmp == 0;
+#else
+ kres = ERTS_RBT_IS_EQ(key, kx);
+#endif
- if (ERTS_RBT_IS_EQ(key, kx))
+ if (kres)
return x;
- if (ERTS_RBT_IS_LT(key, kx)) {
+#ifdef ERTS_RBT_CMP_KEYS
+ kres = kcmp < 0;
+#else
+ kres = ERTS_RBT_IS_LT(key, kx);
+#endif
+
+ if (kres) {
c = ERTS_RBT_GET_LEFT(x);
if (!c)
return NULL;
@@ -1364,7 +1518,7 @@ ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root,
ystate->x = NULL;
ystate->up = 0;
}
- return 1; /* Done */
+ return 0; /* Done */
}
x = p;
}
@@ -1416,14 +1570,14 @@ ERTS_RBT_FUNC__(foreach_large)(ERTS_RBT_T *root,
#ifdef ERTS_RBT_WANT_FOREACH_YIELDING
-static ERTS_RBT_API_INLINE__ void
+static ERTS_RBT_API_INLINE__ int
ERTS_RBT_FUNC__(foreach_yielding)(ERTS_RBT_T *root,
void (*op)(ERTS_RBT_T *, void *),
void *arg,
ERTS_RBT_YIELD_STATE_T__ *ystate,
Sint ylimit)
{
- (void) ERTS_RBT_FUNC__(foreach_unordered__)(*root, 0, op, arg,
+ return ERTS_RBT_FUNC__(foreach_unordered__)(&root, 0, op, arg,
1, ystate, ylimit);
}
@@ -1579,15 +1733,17 @@ ERTS_RBT_FUNC__(debug_print)(FILE *filep, ERTS_RBT_T *x, int indent,
#ifdef ERTS_RBT_NEED_HDBG_CHECK_TREE__
static void
-ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
+ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root, ERTS_RBT_T *n)
{
int black_depth = -1, no_black = 0;
ERTS_RBT_T *c, *p, *x = root;
ERTS_RBT_KEY_T kx;
ERTS_RBT_KEY_T kc;
- if (!x)
+ if (!x) {
+ ERTS_RBT_ASSERT(!n);
return;
+ }
ERTS_RBT_ASSERT(!ERTS_RBT_GET_PARENT(x));
@@ -1597,6 +1753,9 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
while (1) {
+ if (x == n)
+ n = NULL;
+
if (ERTS_RBT_IS_BLACK(x))
no_black++;
else {
@@ -1615,8 +1774,7 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
kx = ERTS_RBT_GET_KEY(x);
kc = ERTS_RBT_GET_KEY(c);
- ERTS_RBT_ASSERT(ERTS_RBT_IS_LT(kc, kx)
- || ERTS_RBT_IS_EQ(kc, kx));
+ ERTS_RBT_ASSERT(!ERTS_RBT_IS_GT__(kc, kx));
x = c;
}
@@ -1634,8 +1792,8 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
kx = ERTS_RBT_GET_KEY(x);
kc = ERTS_RBT_GET_KEY(c);
- ERTS_RBT_ASSERT(ERTS_RBT_IS_LT(kx, kc)
- || ERTS_RBT_IS_EQ(kx, kc));
+ ERTS_RBT_ASSERT(!ERTS_RBT_IS_GT__(kx, kc));
+
x = c;
}
@@ -1657,8 +1815,8 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
kx = ERTS_RBT_GET_KEY(x);
kc = ERTS_RBT_GET_KEY(c);
- ERTS_RBT_ASSERT(ERTS_RBT_IS_LT(kx, kc)
- || ERTS_RBT_IS_EQ(kx, kc));
+ ERTS_RBT_ASSERT(!ERTS_RBT_IS_GT__(kx, kc));
+
/* Go down tree of x's sibling... */
x = c;
break;
@@ -1668,6 +1826,7 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
if (!p) {
ERTS_RBT_ASSERT(root == x);
ERTS_RBT_ASSERT(no_black == 0);
+ ERTS_RBT_ASSERT(!n);
return; /* Done */
}
@@ -1691,6 +1850,7 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
#undef ERTS_RBT_NEED_FOREACH_ORDERED__
#undef ERTS_RBT_NEED_HDBG_CHECK_TREE__
#undef ERTS_RBT_HDBG_CHECK_TREE__
+#undef ERTS_RBT_IS_GT__
#ifdef ERTS_RBT_UNDEF
# undef ERTS_RBT_PREFIX
@@ -1711,6 +1871,7 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
# undef ERTS_RBT_GET_LEFT
# undef ERTS_RBT_SET_LEFT
# undef ERTS_RBT_GET_KEY
+# undef ERTS_RBT_CMP_KEYS
# undef ERTS_RBT_IS_LT
# undef ERTS_RBT_IS_EQ
# undef ERTS_RBT_UNDEF
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
index cab4bd73db..9766e76a83 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -32,13 +32,12 @@
# include "config.h"
#endif
-#ifdef ERTS_SMP
#include "erl_process.h"
#include "erl_thr_progress.h"
erts_sspa_data_t *
-erts_sspa_create(size_t blk_sz, int pa_size)
+erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name)
{
erts_sspa_data_t *data;
size_t tot_size;
@@ -48,23 +47,40 @@ erts_sspa_create(size_t blk_sz, int pa_size)
int cix;
int no_blocks = pa_size;
int no_blocks_per_chunk;
+ size_t aligned_blk_sz;
- if (erts_no_schedulers == 1)
+#if !defined(ERTS_STRUCTURE_ALIGNED_ALLOC)
+ /* Force 64-bit alignment... */
+ aligned_blk_sz = ((blk_sz - 1) / 8) * 8 + 8;
+#else
+ /* Alignment of structure is enough... */
+ aligned_blk_sz = blk_sz;
+#endif
+
+ if (!name) { /* schedulers only variant */
+ ASSERT(!nthreads);
+ nthreads = erts_no_schedulers;
+ }
+ else {
+ ASSERT(nthreads > 0);
+ }
+
+ if (nthreads == 1)
no_blocks_per_chunk = no_blocks;
else {
int extra = (no_blocks - 1)/4 + 1;
if (extra == 0)
extra = 1;
no_blocks_per_chunk = no_blocks;
- no_blocks_per_chunk += extra*erts_no_schedulers;
- no_blocks_per_chunk /= erts_no_schedulers;
+ no_blocks_per_chunk += extra * nthreads;
+ no_blocks_per_chunk /= nthreads;
}
- no_blocks = no_blocks_per_chunk * erts_no_schedulers;
+ no_blocks = no_blocks_per_chunk * nthreads;
chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t));
- chunk_mem_size += blk_sz * no_blocks_per_chunk;
+ chunk_mem_size += aligned_blk_sz * no_blocks_per_chunk;
chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size);
tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
- tot_size += chunk_mem_size*erts_no_schedulers;
+ tot_size += chunk_mem_size * nthreads;
p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_PRE_ALLOC_DATA, tot_size);
data = (erts_sspa_data_t *) p;
@@ -73,10 +89,16 @@ erts_sspa_create(size_t blk_sz, int pa_size)
data->chunks_mem_size = chunk_mem_size;
data->start = chunk_start;
- data->end = chunk_start + chunk_mem_size*erts_no_schedulers;
+ data->end = chunk_start + chunk_mem_size * nthreads;
+ data->nthreads = nthreads;
+
+ if (name) { /* thread variant */
+ erts_tsd_key_create(&data->tsd_key, (char*)name);
+ erts_atomic_init_nob(&data->id_generator, 0);
+ }
/* Initialize all chunks */
- for (cix = 0; cix < erts_no_schedulers; cix++) {
+ for (cix = 0; cix < nthreads; cix++) {
erts_sspa_chunk_t *chnk = erts_sspa_cix2chunk(data, cix);
erts_sspa_chunk_header_t *chdr = &chnk->aligned.header;
erts_sspa_blk_t *blk;
@@ -102,7 +124,7 @@ erts_sspa_create(size_t blk_sz, int pa_size)
blk = (erts_sspa_blk_t *) p;
for (i = 0; i < no_blocks_per_chunk; i++) {
blk = (erts_sspa_blk_t *) p;
- p += blk_sz;
+ p += aligned_blk_sz;
blk->next_ptr = (erts_sspa_blk_t *) p;
}
@@ -161,7 +183,7 @@ enqueue_remote_managed_thread(erts_sspa_chunk_header_t *chdr,
if ((i & 1) == 0)
itmp = itmp2;
else {
- enq = (erts_sspa_blk_t *) itmp;
+ enq = (erts_sspa_blk_t *) itmp2;
itmp = erts_atomic_read_acqb(&enq->next_atmc);
ASSERT(itmp != ERTS_AINT_NULL);
}
@@ -325,4 +347,3 @@ erts_sspa_process_remote_frees(erts_sspa_chunk_header_t *chdr,
return res;
}
-#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
index 7808d7d438..74cc966cbe 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.h
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,7 +31,6 @@
#ifndef ERTS_SCHED_SPEC_PRE_ALLOC_H__
#define ERTS_SCHED_SPEC_PRE_ALLOC_H__
-#ifdef ERTS_SMP
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
@@ -60,6 +59,11 @@ typedef struct {
char *start;
char *end;
int chunks_mem_size;
+ int nthreads;
+
+ /* Used only by thread variant: */
+ erts_tsd_key_t tsd_key;
+ erts_atomic_t id_generator;
} erts_sspa_data_t;
typedef union erts_sspa_blk_t_ erts_sspa_blk_t;
@@ -141,7 +145,9 @@ check_local_list(erts_sspa_chunk_header_t *chdr)
#endif
erts_sspa_data_t *erts_sspa_create(size_t blk_sz,
- int pa_size);
+ int pa_size,
+ int nthreads,
+ const char* name);
void erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr,
erts_sspa_blk_t *blk,
int cinit);
@@ -159,7 +165,7 @@ ERTS_GLB_INLINE int erts_sspa_free(erts_sspa_data_t *data, int cix, char *blk);
ERTS_GLB_INLINE erts_sspa_chunk_t *
erts_sspa_cix2chunk(erts_sspa_data_t *data, int cix)
{
- ASSERT(0 <= cix && cix < erts_no_schedulers);
+ ASSERT(0 <= cix && cix < data->nthreads);
return (erts_sspa_chunk_t *) (data->start + cix*data->chunks_mem_size);
}
@@ -172,7 +178,7 @@ erts_sspa_ptr2cix(erts_sspa_data_t *data, void *ptr)
return -1;
diff = ((char *) ptr) - data->start;
cix = (int) diff / data->chunks_mem_size;
- ASSERT(0 <= cix && cix < erts_no_schedulers);
+ ASSERT(0 <= cix && cix < data->nthreads);
return cix;
}
@@ -182,6 +188,7 @@ erts_sspa_alloc(erts_sspa_data_t *data, int cix)
erts_sspa_chunk_t *chnk;
erts_sspa_chunk_header_t *chdr;
erts_sspa_blk_t *res;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_ALLOC);
chnk = erts_sspa_cix2chunk(data, cix);
chdr = &chnk->aligned.header;
@@ -195,11 +202,15 @@ erts_sspa_alloc(erts_sspa_data_t *data, int cix)
chdr->local.last = NULL;
ERTS_SSPA_DBG_CHK_LCL(chdr);
}
- if (chdr->local.cnt <= chdr->local.lim)
- return (char *) erts_sspa_process_remote_frees(chdr, res);
+ if (chdr->local.cnt <= chdr->local.lim) {
+ res = erts_sspa_process_remote_frees(chdr, res);
+ ERTS_MSACC_POP_STATE_M_X();
+ return (char*) res;
+ }
else if (chdr->head.no_thr_progress_check < ERTS_SSPA_FORCE_THR_CHECK_PROGRESS)
chdr->head.no_thr_progress_check++;
ASSERT(res);
+ ERTS_MSACC_POP_STATE_M_X();
return (char *) res;
}
@@ -236,6 +247,5 @@ erts_sspa_free(erts_sspa_data_t *data, int cix, char *cblk)
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* ERTS_SMP */
#endif /* ERTS_SCHED_SPEC_PRE_ALLOC_H__ */
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
deleted file mode 100644
index 713ed50b86..0000000000
--- a/erts/emulator/beam/erl_smp.h
+++ /dev/null
@@ -1,1640 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2016. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * %CopyrightEnd%
- */
-/*
- * SMP interface to ethread library.
- * This is essentially "sed s/erts_/erts_smp_/g < erl_threads.h > erl_smp.h",
- * plus changes to NOP operations when ERTS_SMP is disabled.
- * Author: Mikael Pettersson
- */
-#ifndef ERL_SMP_H
-#define ERL_SMP_H
-#include "erl_threads.h"
-
-#ifdef ERTS_ENABLE_LOCK_POSITION
-#define erts_smp_mtx_lock(L) erts_smp_mtx_lock_x(L, __FILE__, __LINE__)
-#define erts_smp_mtx_trylock(L) erts_smp_mtx_trylock_x(L, __FILE__, __LINE__)
-#define erts_smp_spin_lock(L) erts_smp_spin_lock_x(L, __FILE__, __LINE__)
-#define erts_smp_rwmtx_tryrlock(L) erts_smp_rwmtx_tryrlock_x(L, __FILE__, __LINE__)
-#define erts_smp_rwmtx_rlock(L) erts_smp_rwmtx_rlock_x(L, __FILE__, __LINE__)
-#define erts_smp_rwmtx_tryrwlock(L) erts_smp_rwmtx_tryrwlock_x(L, __FILE__, __LINE__)
-#define erts_smp_rwmtx_rwlock(L) erts_smp_rwmtx_rwlock_x(L, __FILE__, __LINE__)
-#define erts_smp_read_lock(L) erts_smp_read_lock_x(L, __FILE__, __LINE__)
-#define erts_smp_write_lock(L) erts_smp_write_lock_x(L, __FILE__, __LINE__)
-#endif
-
-
-#ifdef ERTS_SMP
-#define ERTS_SMP_THR_OPTS_DEFAULT_INITER ERTS_THR_OPTS_DEFAULT_INITER
-typedef erts_thr_opts_t erts_smp_thr_opts_t;
-typedef erts_thr_init_data_t erts_smp_thr_init_data_t;
-typedef erts_tid_t erts_smp_tid_t;
-typedef erts_mtx_t erts_smp_mtx_t;
-typedef erts_cnd_t erts_smp_cnd_t;
-#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER ERTS_RWMTX_OPT_DEFAULT_INITER
-#define ERTS_SMP_RWMTX_TYPE_NORMAL ERTS_RWMTX_TYPE_NORMAL
-#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ ERTS_RWMTX_TYPE_FREQUENT_READ
-#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \
- ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ
-#define ERTS_SMP_RWMTX_LONG_LIVED ERTS_RWMTX_LONG_LIVED
-#define ERTS_SMP_RWMTX_SHORT_LIVED ERTS_RWMTX_SHORT_LIVED
-#define ERTS_SMP_RWMTX_UNKNOWN_LIVED ERTS_RWMTX_UNKNOWN_LIVED
-typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t;
-typedef erts_rwmtx_t erts_smp_rwmtx_t;
-typedef erts_tsd_key_t erts_smp_tsd_key_t;
-#define erts_smp_dw_atomic_t erts_dw_atomic_t
-#define erts_smp_atomic_t erts_atomic_t
-#define erts_smp_atomic32_t erts_atomic32_t
-#define erts_smp_atomic64_t erts_atomic64_t
-typedef erts_spinlock_t erts_smp_spinlock_t;
-typedef erts_rwlock_t erts_smp_rwlock_t;
-void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
-
-#define ERTS_SMP_MEMORY_BARRIER ERTS_THR_MEMORY_BARRIER
-#define ERTS_SMP_WRITE_MEMORY_BARRIER ERTS_THR_WRITE_MEMORY_BARRIER
-#define ERTS_SMP_READ_MEMORY_BARRIER ERTS_THR_READ_MEMORY_BARRIER
-#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER
-
-#else /* #ifdef ERTS_SMP */
-
-#define ERTS_SMP_THR_OPTS_DEFAULT_INITER {0}
-typedef int erts_smp_thr_opts_t;
-typedef int erts_smp_thr_init_data_t;
-typedef int erts_smp_tid_t;
-typedef int erts_smp_mtx_t;
-typedef int erts_smp_cnd_t;
-#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER {0}
-#define ERTS_SMP_RWMTX_TYPE_NORMAL 0
-#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ 0
-#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
-#define ERTS_SMP_RWMTX_LONG_LIVED 0
-#define ERTS_SMP_RWMTX_SHORT_LIVED 0
-#define ERTS_SMP_RWMTX_UNKNOWN_LIVED 0
-typedef struct {
- char type;
- char lived;
- int main_spincount;
- int aux_spincount;
-} erts_smp_rwmtx_opt_t;
-typedef int erts_smp_rwmtx_t;
-typedef int erts_smp_tsd_key_t;
-#define erts_smp_dw_atomic_t erts_no_dw_atomic_t
-#define erts_smp_atomic_t erts_no_atomic_t
-#define erts_smp_atomic32_t erts_no_atomic32_t
-#define erts_smp_atomic64_t erts_no_atomic64_t
-#if __GNUC__ > 2
-typedef struct { } erts_smp_spinlock_t;
-typedef struct { } erts_smp_rwlock_t;
-#else
-typedef struct { int gcc_is_buggy; } erts_smp_spinlock_t;
-typedef struct { int gcc_is_buggy; } erts_smp_rwlock_t;
-#endif
-
-#define ERTS_SMP_MEMORY_BARRIER
-#define ERTS_SMP_WRITE_MEMORY_BARRIER
-#define ERTS_SMP_READ_MEMORY_BARRIER
-#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER
-
-#endif /* #ifdef ERTS_SMP */
-
-ERTS_GLB_INLINE void erts_smp_thr_init(erts_smp_thr_init_data_t *id);
-ERTS_GLB_INLINE void erts_smp_thr_create(erts_smp_tid_t *tid,
- void * (*func)(void *),
- void *arg,
- erts_smp_thr_opts_t *opts);
-ERTS_GLB_INLINE void erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res);
-ERTS_GLB_INLINE void erts_smp_thr_detach(erts_smp_tid_t tid);
-ERTS_GLB_INLINE void erts_smp_thr_exit(void *res);
-ERTS_GLB_INLINE void erts_smp_install_exit_handler(void (*exit_handler)(void));
-ERTS_GLB_INLINE erts_smp_tid_t erts_smp_thr_self(void);
-ERTS_GLB_INLINE int erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y);
-#ifdef ERTS_HAVE_REC_MTX_INIT
-#define ERTS_SMP_HAVE_REC_MTX_INIT 1
-ERTS_GLB_INLINE void erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx);
-#endif
-ERTS_GLB_INLINE void erts_smp_mtx_init_x(erts_smp_mtx_t *mtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
-#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
-ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
-#else
-ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_mtx_lock(erts_smp_mtx_t *mtx);
-#endif
-ERTS_GLB_INLINE void erts_smp_mtx_unlock(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE int erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_cnd_init(erts_smp_cnd_t *cnd);
-ERTS_GLB_INLINE void erts_smp_cnd_destroy(erts_smp_cnd_t *cnd);
-ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd,
- erts_smp_mtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd);
-ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd);
-ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
- char *name);
-ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
-#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
-ERTS_GLB_INLINE void erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
-ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
-#else
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE void erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
-#endif
-ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
-ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
-ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock,
- char *name);
-ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line);
-#else
-ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock);
-#endif
-ERTS_GLB_INLINE int erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock,
- char *name);
-ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock);
-#ifdef ERTS_ENABLE_LOCK_POSITION
-ERTS_GLB_INLINE void erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
-ERTS_GLB_INLINE void erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
-#else
-ERTS_GLB_INLINE void erts_smp_read_lock(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock);
-#endif
-ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp,
- char *keyname);
-ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
-ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
-ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key);
-
-#ifdef ERTS_THR_HAVE_SIG_FUNCS
-#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
-ERTS_GLB_INLINE void erts_smp_thr_sigmask(int how,
- const sigset_t *set,
- sigset_t *oset);
-ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
-#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */
-
-/*
- * See "Documentation of atomics and memory barriers" at the top
- * of erl_threads.h for info on atomics.
- */
-
-#ifdef ERTS_SMP
-
-/* Double word size atomics */
-
-#define erts_smp_dw_atomic_init_nob erts_dw_atomic_init_nob
-#define erts_smp_dw_atomic_set_nob erts_dw_atomic_set_nob
-#define erts_smp_dw_atomic_read_nob erts_dw_atomic_read_nob
-#define erts_smp_dw_atomic_cmpxchg_nob erts_dw_atomic_cmpxchg_nob
-
-#define erts_smp_dw_atomic_init_mb erts_dw_atomic_init_mb
-#define erts_smp_dw_atomic_set_mb erts_dw_atomic_set_mb
-#define erts_smp_dw_atomic_read_mb erts_dw_atomic_read_mb
-#define erts_smp_dw_atomic_cmpxchg_mb erts_dw_atomic_cmpxchg_mb
-
-#define erts_smp_dw_atomic_init_acqb erts_dw_atomic_init_acqb
-#define erts_smp_dw_atomic_set_acqb erts_dw_atomic_set_acqb
-#define erts_smp_dw_atomic_read_acqb erts_dw_atomic_read_acqb
-#define erts_smp_dw_atomic_cmpxchg_acqb erts_dw_atomic_cmpxchg_acqb
-
-#define erts_smp_dw_atomic_init_relb erts_dw_atomic_init_relb
-#define erts_smp_dw_atomic_set_relb erts_dw_atomic_set_relb
-#define erts_smp_dw_atomic_read_relb erts_dw_atomic_read_relb
-#define erts_smp_dw_atomic_cmpxchg_relb erts_dw_atomic_cmpxchg_relb
-
-#define erts_smp_dw_atomic_init_ddrb erts_dw_atomic_init_ddrb
-#define erts_smp_dw_atomic_set_ddrb erts_dw_atomic_set_ddrb
-#define erts_smp_dw_atomic_read_ddrb erts_dw_atomic_read_ddrb
-#define erts_smp_dw_atomic_cmpxchg_ddrb erts_dw_atomic_cmpxchg_ddrb
-
-#define erts_smp_dw_atomic_init_rb erts_dw_atomic_init_rb
-#define erts_smp_dw_atomic_set_rb erts_dw_atomic_set_rb
-#define erts_smp_dw_atomic_read_rb erts_dw_atomic_read_rb
-#define erts_smp_dw_atomic_cmpxchg_rb erts_dw_atomic_cmpxchg_rb
-
-#define erts_smp_dw_atomic_init_wb erts_dw_atomic_init_wb
-#define erts_smp_dw_atomic_set_wb erts_dw_atomic_set_wb
-#define erts_smp_dw_atomic_read_wb erts_dw_atomic_read_wb
-#define erts_smp_dw_atomic_cmpxchg_wb erts_dw_atomic_cmpxchg_wb
-
-#define erts_smp_dw_atomic_set_dirty erts_dw_atomic_set_dirty
-#define erts_smp_dw_atomic_read_dirty erts_dw_atomic_read_dirty
-
-/* Word size atomics */
-
-#define erts_smp_atomic_init_nob erts_atomic_init_nob
-#define erts_smp_atomic_set_nob erts_atomic_set_nob
-#define erts_smp_atomic_read_nob erts_atomic_read_nob
-#define erts_smp_atomic_inc_read_nob erts_atomic_inc_read_nob
-#define erts_smp_atomic_dec_read_nob erts_atomic_dec_read_nob
-#define erts_smp_atomic_inc_nob erts_atomic_inc_nob
-#define erts_smp_atomic_dec_nob erts_atomic_dec_nob
-#define erts_smp_atomic_add_read_nob erts_atomic_add_read_nob
-#define erts_smp_atomic_add_nob erts_atomic_add_nob
-#define erts_smp_atomic_read_bor_nob erts_atomic_read_bor_nob
-#define erts_smp_atomic_read_band_nob erts_atomic_read_band_nob
-#define erts_smp_atomic_xchg_nob erts_atomic_xchg_nob
-#define erts_smp_atomic_cmpxchg_nob erts_atomic_cmpxchg_nob
-#define erts_smp_atomic_read_bset_nob erts_atomic_read_bset_nob
-
-#define erts_smp_atomic_init_mb erts_atomic_init_mb
-#define erts_smp_atomic_set_mb erts_atomic_set_mb
-#define erts_smp_atomic_read_mb erts_atomic_read_mb
-#define erts_smp_atomic_inc_read_mb erts_atomic_inc_read_mb
-#define erts_smp_atomic_dec_read_mb erts_atomic_dec_read_mb
-#define erts_smp_atomic_inc_mb erts_atomic_inc_mb
-#define erts_smp_atomic_dec_mb erts_atomic_dec_mb
-#define erts_smp_atomic_add_read_mb erts_atomic_add_read_mb
-#define erts_smp_atomic_add_mb erts_atomic_add_mb
-#define erts_smp_atomic_read_bor_mb erts_atomic_read_bor_mb
-#define erts_smp_atomic_read_band_mb erts_atomic_read_band_mb
-#define erts_smp_atomic_xchg_mb erts_atomic_xchg_mb
-#define erts_smp_atomic_cmpxchg_mb erts_atomic_cmpxchg_mb
-#define erts_smp_atomic_read_bset_mb erts_atomic_read_bset_mb
-
-#define erts_smp_atomic_init_acqb erts_atomic_init_acqb
-#define erts_smp_atomic_set_acqb erts_atomic_set_acqb
-#define erts_smp_atomic_read_acqb erts_atomic_read_acqb
-#define erts_smp_atomic_inc_read_acqb erts_atomic_inc_read_acqb
-#define erts_smp_atomic_dec_read_acqb erts_atomic_dec_read_acqb
-#define erts_smp_atomic_inc_acqb erts_atomic_inc_acqb
-#define erts_smp_atomic_dec_acqb erts_atomic_dec_acqb
-#define erts_smp_atomic_add_read_acqb erts_atomic_add_read_acqb
-#define erts_smp_atomic_add_acqb erts_atomic_add_acqb
-#define erts_smp_atomic_read_bor_acqb erts_atomic_read_bor_acqb
-#define erts_smp_atomic_read_band_acqb erts_atomic_read_band_acqb
-#define erts_smp_atomic_xchg_acqb erts_atomic_xchg_acqb
-#define erts_smp_atomic_cmpxchg_acqb erts_atomic_cmpxchg_acqb
-#define erts_smp_atomic_read_bset_acqb erts_atomic_read_bset_acqb
-
-#define erts_smp_atomic_init_relb erts_atomic_init_relb
-#define erts_smp_atomic_set_relb erts_atomic_set_relb
-#define erts_smp_atomic_read_relb erts_atomic_read_relb
-#define erts_smp_atomic_inc_read_relb erts_atomic_inc_read_relb
-#define erts_smp_atomic_dec_read_relb erts_atomic_dec_read_relb
-#define erts_smp_atomic_inc_relb erts_atomic_inc_relb
-#define erts_smp_atomic_dec_relb erts_atomic_dec_relb
-#define erts_smp_atomic_add_read_relb erts_atomic_add_read_relb
-#define erts_smp_atomic_add_relb erts_atomic_add_relb
-#define erts_smp_atomic_read_bor_relb erts_atomic_read_bor_relb
-#define erts_smp_atomic_read_band_relb erts_atomic_read_band_relb
-#define erts_smp_atomic_xchg_relb erts_atomic_xchg_relb
-#define erts_smp_atomic_cmpxchg_relb erts_atomic_cmpxchg_relb
-#define erts_smp_atomic_read_bset_relb erts_atomic_read_bset_relb
-
-#define erts_smp_atomic_init_ddrb erts_atomic_init_ddrb
-#define erts_smp_atomic_set_ddrb erts_atomic_set_ddrb
-#define erts_smp_atomic_read_ddrb erts_atomic_read_ddrb
-#define erts_smp_atomic_inc_read_ddrb erts_atomic_inc_read_ddrb
-#define erts_smp_atomic_dec_read_ddrb erts_atomic_dec_read_ddrb
-#define erts_smp_atomic_inc_ddrb erts_atomic_inc_ddrb
-#define erts_smp_atomic_dec_ddrb erts_atomic_dec_ddrb
-#define erts_smp_atomic_add_read_ddrb erts_atomic_add_read_ddrb
-#define erts_smp_atomic_add_ddrb erts_atomic_add_ddrb
-#define erts_smp_atomic_read_bor_ddrb erts_atomic_read_bor_ddrb
-#define erts_smp_atomic_read_band_ddrb erts_atomic_read_band_ddrb
-#define erts_smp_atomic_xchg_ddrb erts_atomic_xchg_ddrb
-#define erts_smp_atomic_cmpxchg_ddrb erts_atomic_cmpxchg_ddrb
-#define erts_smp_atomic_read_bset_ddrb erts_atomic_read_bset_ddrb
-
-#define erts_smp_atomic_init_rb erts_atomic_init_rb
-#define erts_smp_atomic_set_rb erts_atomic_set_rb
-#define erts_smp_atomic_read_rb erts_atomic_read_rb
-#define erts_smp_atomic_inc_read_rb erts_atomic_inc_read_rb
-#define erts_smp_atomic_dec_read_rb erts_atomic_dec_read_rb
-#define erts_smp_atomic_inc_rb erts_atomic_inc_rb
-#define erts_smp_atomic_dec_rb erts_atomic_dec_rb
-#define erts_smp_atomic_add_read_rb erts_atomic_add_read_rb
-#define erts_smp_atomic_add_rb erts_atomic_add_rb
-#define erts_smp_atomic_read_bor_rb erts_atomic_read_bor_rb
-#define erts_smp_atomic_read_band_rb erts_atomic_read_band_rb
-#define erts_smp_atomic_xchg_rb erts_atomic_xchg_rb
-#define erts_smp_atomic_cmpxchg_rb erts_atomic_cmpxchg_rb
-#define erts_smp_atomic_read_bset_rb erts_atomic_read_bset_rb
-
-#define erts_smp_atomic_init_wb erts_atomic_init_wb
-#define erts_smp_atomic_set_wb erts_atomic_set_wb
-#define erts_smp_atomic_read_wb erts_atomic_read_wb
-#define erts_smp_atomic_inc_read_wb erts_atomic_inc_read_wb
-#define erts_smp_atomic_dec_read_wb erts_atomic_dec_read_wb
-#define erts_smp_atomic_inc_wb erts_atomic_inc_wb
-#define erts_smp_atomic_dec_wb erts_atomic_dec_wb
-#define erts_smp_atomic_add_read_wb erts_atomic_add_read_wb
-#define erts_smp_atomic_add_wb erts_atomic_add_wb
-#define erts_smp_atomic_read_bor_wb erts_atomic_read_bor_wb
-#define erts_smp_atomic_read_band_wb erts_atomic_read_band_wb
-#define erts_smp_atomic_xchg_wb erts_atomic_xchg_wb
-#define erts_smp_atomic_cmpxchg_wb erts_atomic_cmpxchg_wb
-#define erts_smp_atomic_read_bset_wb erts_atomic_read_bset_wb
-
-#define erts_smp_atomic_set_dirty erts_atomic_set_dirty
-#define erts_smp_atomic_read_dirty erts_atomic_read_dirty
-
-/* 32-bit atomics */
-
-#define erts_smp_atomic32_init_nob erts_atomic32_init_nob
-#define erts_smp_atomic32_set_nob erts_atomic32_set_nob
-#define erts_smp_atomic32_read_nob erts_atomic32_read_nob
-#define erts_smp_atomic32_inc_read_nob erts_atomic32_inc_read_nob
-#define erts_smp_atomic32_dec_read_nob erts_atomic32_dec_read_nob
-#define erts_smp_atomic32_inc_nob erts_atomic32_inc_nob
-#define erts_smp_atomic32_dec_nob erts_atomic32_dec_nob
-#define erts_smp_atomic32_add_read_nob erts_atomic32_add_read_nob
-#define erts_smp_atomic32_add_nob erts_atomic32_add_nob
-#define erts_smp_atomic32_read_bor_nob erts_atomic32_read_bor_nob
-#define erts_smp_atomic32_read_band_nob erts_atomic32_read_band_nob
-#define erts_smp_atomic32_xchg_nob erts_atomic32_xchg_nob
-#define erts_smp_atomic32_cmpxchg_nob erts_atomic32_cmpxchg_nob
-#define erts_smp_atomic32_read_bset_nob erts_atomic32_read_bset_nob
-
-#define erts_smp_atomic32_init_mb erts_atomic32_init_mb
-#define erts_smp_atomic32_set_mb erts_atomic32_set_mb
-#define erts_smp_atomic32_read_mb erts_atomic32_read_mb
-#define erts_smp_atomic32_inc_read_mb erts_atomic32_inc_read_mb
-#define erts_smp_atomic32_dec_read_mb erts_atomic32_dec_read_mb
-#define erts_smp_atomic32_inc_mb erts_atomic32_inc_mb
-#define erts_smp_atomic32_dec_mb erts_atomic32_dec_mb
-#define erts_smp_atomic32_add_read_mb erts_atomic32_add_read_mb
-#define erts_smp_atomic32_add_mb erts_atomic32_add_mb
-#define erts_smp_atomic32_read_bor_mb erts_atomic32_read_bor_mb
-#define erts_smp_atomic32_read_band_mb erts_atomic32_read_band_mb
-#define erts_smp_atomic32_xchg_mb erts_atomic32_xchg_mb
-#define erts_smp_atomic32_cmpxchg_mb erts_atomic32_cmpxchg_mb
-#define erts_smp_atomic32_read_bset_mb erts_atomic32_read_bset_mb
-
-#define erts_smp_atomic32_init_acqb erts_atomic32_init_acqb
-#define erts_smp_atomic32_set_acqb erts_atomic32_set_acqb
-#define erts_smp_atomic32_read_acqb erts_atomic32_read_acqb
-#define erts_smp_atomic32_inc_read_acqb erts_atomic32_inc_read_acqb
-#define erts_smp_atomic32_dec_read_acqb erts_atomic32_dec_read_acqb
-#define erts_smp_atomic32_inc_acqb erts_atomic32_inc_acqb
-#define erts_smp_atomic32_dec_acqb erts_atomic32_dec_acqb
-#define erts_smp_atomic32_add_read_acqb erts_atomic32_add_read_acqb
-#define erts_smp_atomic32_add_acqb erts_atomic32_add_acqb
-#define erts_smp_atomic32_read_bor_acqb erts_atomic32_read_bor_acqb
-#define erts_smp_atomic32_read_band_acqb erts_atomic32_read_band_acqb
-#define erts_smp_atomic32_xchg_acqb erts_atomic32_xchg_acqb
-#define erts_smp_atomic32_cmpxchg_acqb erts_atomic32_cmpxchg_acqb
-#define erts_smp_atomic32_read_bset_acqb erts_atomic32_read_bset_acqb
-
-#define erts_smp_atomic32_init_relb erts_atomic32_init_relb
-#define erts_smp_atomic32_set_relb erts_atomic32_set_relb
-#define erts_smp_atomic32_read_relb erts_atomic32_read_relb
-#define erts_smp_atomic32_inc_read_relb erts_atomic32_inc_read_relb
-#define erts_smp_atomic32_dec_read_relb erts_atomic32_dec_read_relb
-#define erts_smp_atomic32_inc_relb erts_atomic32_inc_relb
-#define erts_smp_atomic32_dec_relb erts_atomic32_dec_relb
-#define erts_smp_atomic32_add_read_relb erts_atomic32_add_read_relb
-#define erts_smp_atomic32_add_relb erts_atomic32_add_relb
-#define erts_smp_atomic32_read_bor_relb erts_atomic32_read_bor_relb
-#define erts_smp_atomic32_read_band_relb erts_atomic32_read_band_relb
-#define erts_smp_atomic32_xchg_relb erts_atomic32_xchg_relb
-#define erts_smp_atomic32_cmpxchg_relb erts_atomic32_cmpxchg_relb
-#define erts_smp_atomic32_read_bset_relb erts_atomic32_read_bset_relb
-
-#define erts_smp_atomic32_init_ddrb erts_atomic32_init_ddrb
-#define erts_smp_atomic32_set_ddrb erts_atomic32_set_ddrb
-#define erts_smp_atomic32_read_ddrb erts_atomic32_read_ddrb
-#define erts_smp_atomic32_inc_read_ddrb erts_atomic32_inc_read_ddrb
-#define erts_smp_atomic32_dec_read_ddrb erts_atomic32_dec_read_ddrb
-#define erts_smp_atomic32_inc_ddrb erts_atomic32_inc_ddrb
-#define erts_smp_atomic32_dec_ddrb erts_atomic32_dec_ddrb
-#define erts_smp_atomic32_add_read_ddrb erts_atomic32_add_read_ddrb
-#define erts_smp_atomic32_add_ddrb erts_atomic32_add_ddrb
-#define erts_smp_atomic32_read_bor_ddrb erts_atomic32_read_bor_ddrb
-#define erts_smp_atomic32_read_band_ddrb erts_atomic32_read_band_ddrb
-#define erts_smp_atomic32_xchg_ddrb erts_atomic32_xchg_ddrb
-#define erts_smp_atomic32_cmpxchg_ddrb erts_atomic32_cmpxchg_ddrb
-#define erts_smp_atomic32_read_bset_ddrb erts_atomic32_read_bset_ddrb
-
-#define erts_smp_atomic32_init_rb erts_atomic32_init_rb
-#define erts_smp_atomic32_set_rb erts_atomic32_set_rb
-#define erts_smp_atomic32_read_rb erts_atomic32_read_rb
-#define erts_smp_atomic32_inc_read_rb erts_atomic32_inc_read_rb
-#define erts_smp_atomic32_dec_read_rb erts_atomic32_dec_read_rb
-#define erts_smp_atomic32_inc_rb erts_atomic32_inc_rb
-#define erts_smp_atomic32_dec_rb erts_atomic32_dec_rb
-#define erts_smp_atomic32_add_read_rb erts_atomic32_add_read_rb
-#define erts_smp_atomic32_add_rb erts_atomic32_add_rb
-#define erts_smp_atomic32_read_bor_rb erts_atomic32_read_bor_rb
-#define erts_smp_atomic32_read_band_rb erts_atomic32_read_band_rb
-#define erts_smp_atomic32_xchg_rb erts_atomic32_xchg_rb
-#define erts_smp_atomic32_cmpxchg_rb erts_atomic32_cmpxchg_rb
-#define erts_smp_atomic32_read_bset_rb erts_atomic32_read_bset_rb
-
-#define erts_smp_atomic32_init_wb erts_atomic32_init_wb
-#define erts_smp_atomic32_set_wb erts_atomic32_set_wb
-#define erts_smp_atomic32_read_wb erts_atomic32_read_wb
-#define erts_smp_atomic32_inc_read_wb erts_atomic32_inc_read_wb
-#define erts_smp_atomic32_dec_read_wb erts_atomic32_dec_read_wb
-#define erts_smp_atomic32_inc_wb erts_atomic32_inc_wb
-#define erts_smp_atomic32_dec_wb erts_atomic32_dec_wb
-#define erts_smp_atomic32_add_read_wb erts_atomic32_add_read_wb
-#define erts_smp_atomic32_add_wb erts_atomic32_add_wb
-#define erts_smp_atomic32_read_bor_wb erts_atomic32_read_bor_wb
-#define erts_smp_atomic32_read_band_wb erts_atomic32_read_band_wb
-#define erts_smp_atomic32_xchg_wb erts_atomic32_xchg_wb
-#define erts_smp_atomic32_cmpxchg_wb erts_atomic32_cmpxchg_wb
-#define erts_smp_atomic32_read_bset_wb erts_atomic32_read_bset_wb
-
-#define erts_smp_atomic32_set_dirty erts_atomic32_set_dirty
-#define erts_smp_atomic32_read_dirty erts_atomic32_read_dirty
-
-/* 64-bit atomics */
-
-#define erts_smp_atomic64_init_nob erts_atomic64_init_nob
-#define erts_smp_atomic64_set_nob erts_atomic64_set_nob
-#define erts_smp_atomic64_read_nob erts_atomic64_read_nob
-#define erts_smp_atomic64_inc_read_nob erts_atomic64_inc_read_nob
-#define erts_smp_atomic64_dec_read_nob erts_atomic64_dec_read_nob
-#define erts_smp_atomic64_inc_nob erts_atomic64_inc_nob
-#define erts_smp_atomic64_dec_nob erts_atomic64_dec_nob
-#define erts_smp_atomic64_add_read_nob erts_atomic64_add_read_nob
-#define erts_smp_atomic64_add_nob erts_atomic64_add_nob
-#define erts_smp_atomic64_read_bor_nob erts_atomic64_read_bor_nob
-#define erts_smp_atomic64_read_band_nob erts_atomic64_read_band_nob
-#define erts_smp_atomic64_xchg_nob erts_atomic64_xchg_nob
-#define erts_smp_atomic64_cmpxchg_nob erts_atomic64_cmpxchg_nob
-#define erts_smp_atomic64_read_bset_nob erts_atomic64_read_bset_nob
-
-#define erts_smp_atomic64_init_mb erts_atomic64_init_mb
-#define erts_smp_atomic64_set_mb erts_atomic64_set_mb
-#define erts_smp_atomic64_read_mb erts_atomic64_read_mb
-#define erts_smp_atomic64_inc_read_mb erts_atomic64_inc_read_mb
-#define erts_smp_atomic64_dec_read_mb erts_atomic64_dec_read_mb
-#define erts_smp_atomic64_inc_mb erts_atomic64_inc_mb
-#define erts_smp_atomic64_dec_mb erts_atomic64_dec_mb
-#define erts_smp_atomic64_add_read_mb erts_atomic64_add_read_mb
-#define erts_smp_atomic64_add_mb erts_atomic64_add_mb
-#define erts_smp_atomic64_read_bor_mb erts_atomic64_read_bor_mb
-#define erts_smp_atomic64_read_band_mb erts_atomic64_read_band_mb
-#define erts_smp_atomic64_xchg_mb erts_atomic64_xchg_mb
-#define erts_smp_atomic64_cmpxchg_mb erts_atomic64_cmpxchg_mb
-#define erts_smp_atomic64_read_bset_mb erts_atomic64_read_bset_mb
-
-#define erts_smp_atomic64_init_acqb erts_atomic64_init_acqb
-#define erts_smp_atomic64_set_acqb erts_atomic64_set_acqb
-#define erts_smp_atomic64_read_acqb erts_atomic64_read_acqb
-#define erts_smp_atomic64_inc_read_acqb erts_atomic64_inc_read_acqb
-#define erts_smp_atomic64_dec_read_acqb erts_atomic64_dec_read_acqb
-#define erts_smp_atomic64_inc_acqb erts_atomic64_inc_acqb
-#define erts_smp_atomic64_dec_acqb erts_atomic64_dec_acqb
-#define erts_smp_atomic64_add_read_acqb erts_atomic64_add_read_acqb
-#define erts_smp_atomic64_add_acqb erts_atomic64_add_acqb
-#define erts_smp_atomic64_read_bor_acqb erts_atomic64_read_bor_acqb
-#define erts_smp_atomic64_read_band_acqb erts_atomic64_read_band_acqb
-#define erts_smp_atomic64_xchg_acqb erts_atomic64_xchg_acqb
-#define erts_smp_atomic64_cmpxchg_acqb erts_atomic64_cmpxchg_acqb
-#define erts_smp_atomic64_read_bset_acqb erts_atomic64_read_bset_acqb
-
-#define erts_smp_atomic64_init_relb erts_atomic64_init_relb
-#define erts_smp_atomic64_set_relb erts_atomic64_set_relb
-#define erts_smp_atomic64_read_relb erts_atomic64_read_relb
-#define erts_smp_atomic64_inc_read_relb erts_atomic64_inc_read_relb
-#define erts_smp_atomic64_dec_read_relb erts_atomic64_dec_read_relb
-#define erts_smp_atomic64_inc_relb erts_atomic64_inc_relb
-#define erts_smp_atomic64_dec_relb erts_atomic64_dec_relb
-#define erts_smp_atomic64_add_read_relb erts_atomic64_add_read_relb
-#define erts_smp_atomic64_add_relb erts_atomic64_add_relb
-#define erts_smp_atomic64_read_bor_relb erts_atomic64_read_bor_relb
-#define erts_smp_atomic64_read_band_relb erts_atomic64_read_band_relb
-#define erts_smp_atomic64_xchg_relb erts_atomic64_xchg_relb
-#define erts_smp_atomic64_cmpxchg_relb erts_atomic64_cmpxchg_relb
-#define erts_smp_atomic64_read_bset_relb erts_atomic64_read_bset_relb
-
-#define erts_smp_atomic64_init_ddrb erts_atomic64_init_ddrb
-#define erts_smp_atomic64_set_ddrb erts_atomic64_set_ddrb
-#define erts_smp_atomic64_read_ddrb erts_atomic64_read_ddrb
-#define erts_smp_atomic64_inc_read_ddrb erts_atomic64_inc_read_ddrb
-#define erts_smp_atomic64_dec_read_ddrb erts_atomic64_dec_read_ddrb
-#define erts_smp_atomic64_inc_ddrb erts_atomic64_inc_ddrb
-#define erts_smp_atomic64_dec_ddrb erts_atomic64_dec_ddrb
-#define erts_smp_atomic64_add_read_ddrb erts_atomic64_add_read_ddrb
-#define erts_smp_atomic64_add_ddrb erts_atomic64_add_ddrb
-#define erts_smp_atomic64_read_bor_ddrb erts_atomic64_read_bor_ddrb
-#define erts_smp_atomic64_read_band_ddrb erts_atomic64_read_band_ddrb
-#define erts_smp_atomic64_xchg_ddrb erts_atomic64_xchg_ddrb
-#define erts_smp_atomic64_cmpxchg_ddrb erts_atomic64_cmpxchg_ddrb
-#define erts_smp_atomic64_read_bset_ddrb erts_atomic64_read_bset_ddrb
-
-#define erts_smp_atomic64_init_rb erts_atomic64_init_rb
-#define erts_smp_atomic64_set_rb erts_atomic64_set_rb
-#define erts_smp_atomic64_read_rb erts_atomic64_read_rb
-#define erts_smp_atomic64_inc_read_rb erts_atomic64_inc_read_rb
-#define erts_smp_atomic64_dec_read_rb erts_atomic64_dec_read_rb
-#define erts_smp_atomic64_inc_rb erts_atomic64_inc_rb
-#define erts_smp_atomic64_dec_rb erts_atomic64_dec_rb
-#define erts_smp_atomic64_add_read_rb erts_atomic64_add_read_rb
-#define erts_smp_atomic64_add_rb erts_atomic64_add_rb
-#define erts_smp_atomic64_read_bor_rb erts_atomic64_read_bor_rb
-#define erts_smp_atomic64_read_band_rb erts_atomic64_read_band_rb
-#define erts_smp_atomic64_xchg_rb erts_atomic64_xchg_rb
-#define erts_smp_atomic64_cmpxchg_rb erts_atomic64_cmpxchg_rb
-#define erts_smp_atomic64_read_bset_rb erts_atomic64_read_bset_rb
-
-#define erts_smp_atomic64_init_wb erts_atomic64_init_wb
-#define erts_smp_atomic64_set_wb erts_atomic64_set_wb
-#define erts_smp_atomic64_read_wb erts_atomic64_read_wb
-#define erts_smp_atomic64_inc_read_wb erts_atomic64_inc_read_wb
-#define erts_smp_atomic64_dec_read_wb erts_atomic64_dec_read_wb
-#define erts_smp_atomic64_inc_wb erts_atomic64_inc_wb
-#define erts_smp_atomic64_dec_wb erts_atomic64_dec_wb
-#define erts_smp_atomic64_add_read_wb erts_atomic64_add_read_wb
-#define erts_smp_atomic64_add_wb erts_atomic64_add_wb
-#define erts_smp_atomic64_read_bor_wb erts_atomic64_read_bor_wb
-#define erts_smp_atomic64_read_band_wb erts_atomic64_read_band_wb
-#define erts_smp_atomic64_xchg_wb erts_atomic64_xchg_wb
-#define erts_smp_atomic64_cmpxchg_wb erts_atomic64_cmpxchg_wb
-#define erts_smp_atomic64_read_bset_wb erts_atomic64_read_bset_wb
-
-#define erts_smp_atomic64_set_dirty erts_atomic64_set_dirty
-#define erts_smp_atomic64_read_dirty erts_atomic64_read_dirty
-
-#else /* !ERTS_SMP */
-
-/* Double word size atomics */
-
-#define erts_smp_dw_atomic_init_nob erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_set_nob erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_nob erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_mb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_mb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_mb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_acqb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_acqb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_acqb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_relb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_relb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_relb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_ddrb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_ddrb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_ddrb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_rb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_rb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_rb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_wb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_wb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_wb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_set_dirty erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_dirty erts_no_dw_atomic_read
-
-/* Word size atomics */
-
-#define erts_smp_atomic_init_nob erts_no_atomic_set
-#define erts_smp_atomic_set_nob erts_no_atomic_set
-#define erts_smp_atomic_read_nob erts_no_atomic_read
-#define erts_smp_atomic_inc_read_nob erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_nob erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_nob erts_no_atomic_inc
-#define erts_smp_atomic_dec_nob erts_no_atomic_dec
-#define erts_smp_atomic_add_read_nob erts_no_atomic_add_read
-#define erts_smp_atomic_add_nob erts_no_atomic_add
-#define erts_smp_atomic_read_bor_nob erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_nob erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_nob erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_nob erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_mb erts_no_atomic_set
-#define erts_smp_atomic_set_mb erts_no_atomic_set
-#define erts_smp_atomic_read_mb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_mb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_mb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_mb erts_no_atomic_inc
-#define erts_smp_atomic_dec_mb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_mb erts_no_atomic_add_read
-#define erts_smp_atomic_add_mb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_mb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_mb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_mb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_mb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_acqb erts_no_atomic_set
-#define erts_smp_atomic_set_acqb erts_no_atomic_set
-#define erts_smp_atomic_read_acqb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_acqb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_acqb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_acqb erts_no_atomic_inc
-#define erts_smp_atomic_dec_acqb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_acqb erts_no_atomic_add_read
-#define erts_smp_atomic_add_acqb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_acqb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_acqb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_acqb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_acqb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_relb erts_no_atomic_set
-#define erts_smp_atomic_set_relb erts_no_atomic_set
-#define erts_smp_atomic_read_relb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_relb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_relb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_relb erts_no_atomic_inc
-#define erts_smp_atomic_dec_relb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_relb erts_no_atomic_add_read
-#define erts_smp_atomic_add_relb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_relb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_relb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_relb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_relb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_ddrb erts_no_atomic_set
-#define erts_smp_atomic_set_ddrb erts_no_atomic_set
-#define erts_smp_atomic_read_ddrb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_ddrb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_ddrb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_ddrb erts_no_atomic_inc
-#define erts_smp_atomic_dec_ddrb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_ddrb erts_no_atomic_add_read
-#define erts_smp_atomic_add_ddrb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_ddrb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_ddrb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_ddrb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_ddrb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_rb erts_no_atomic_set
-#define erts_smp_atomic_set_rb erts_no_atomic_set
-#define erts_smp_atomic_read_rb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_rb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_rb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_rb erts_no_atomic_inc
-#define erts_smp_atomic_dec_rb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_rb erts_no_atomic_add_read
-#define erts_smp_atomic_add_rb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_rb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_rb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_rb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_rb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_wb erts_no_atomic_set
-#define erts_smp_atomic_set_wb erts_no_atomic_set
-#define erts_smp_atomic_read_wb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_wb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_wb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_wb erts_no_atomic_inc
-#define erts_smp_atomic_dec_wb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_wb erts_no_atomic_add_read
-#define erts_smp_atomic_add_wb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_wb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_wb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_wb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_wb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_set_dirty erts_no_atomic_set
-#define erts_smp_atomic_read_dirty erts_no_atomic_read
-
-/* 32-bit atomics */
-
-#define erts_smp_atomic32_init_nob erts_no_atomic32_set
-#define erts_smp_atomic32_set_nob erts_no_atomic32_set
-#define erts_smp_atomic32_read_nob erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_nob erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_nob erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_nob erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_nob erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_nob erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_nob erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_nob erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_nob erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_nob erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_nob erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_mb erts_no_atomic32_set
-#define erts_smp_atomic32_set_mb erts_no_atomic32_set
-#define erts_smp_atomic32_read_mb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_mb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_mb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_mb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_mb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_mb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_mb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_mb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_mb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_mb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_mb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_acqb erts_no_atomic32_set
-#define erts_smp_atomic32_set_acqb erts_no_atomic32_set
-#define erts_smp_atomic32_read_acqb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_acqb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_acqb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_acqb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_acqb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_acqb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_acqb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_acqb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_acqb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_acqb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_acqb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_relb erts_no_atomic32_set
-#define erts_smp_atomic32_set_relb erts_no_atomic32_set
-#define erts_smp_atomic32_read_relb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_relb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_relb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_relb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_relb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_relb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_relb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_relb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_relb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_relb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_relb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_ddrb erts_no_atomic32_set
-#define erts_smp_atomic32_set_ddrb erts_no_atomic32_set
-#define erts_smp_atomic32_read_ddrb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_ddrb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_ddrb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_ddrb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_ddrb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_ddrb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_ddrb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_ddrb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_ddrb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_ddrb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_ddrb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_rb erts_no_atomic32_set
-#define erts_smp_atomic32_set_rb erts_no_atomic32_set
-#define erts_smp_atomic32_read_rb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_rb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_rb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_rb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_rb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_rb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_rb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_rb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_rb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_rb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_rb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_wb erts_no_atomic32_set
-#define erts_smp_atomic32_set_wb erts_no_atomic32_set
-#define erts_smp_atomic32_read_wb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_wb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_wb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_wb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_wb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_wb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_wb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_wb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_wb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_wb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_wb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_set_dirty erts_no_atomic32_set
-#define erts_smp_atomic32_read_dirty erts_no_atomic32_read
-
-/* 64-bit atomics */
-
-#define erts_smp_atomic64_init_nob erts_no_atomic64_set
-#define erts_smp_atomic64_set_nob erts_no_atomic64_set
-#define erts_smp_atomic64_read_nob erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_nob erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_nob erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_nob erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_nob erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_nob erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_nob erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_nob erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_nob erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_nob erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_nob erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_mb erts_no_atomic64_set
-#define erts_smp_atomic64_set_mb erts_no_atomic64_set
-#define erts_smp_atomic64_read_mb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_mb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_mb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_mb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_mb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_mb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_mb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_mb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_mb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_mb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_mb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_acqb erts_no_atomic64_set
-#define erts_smp_atomic64_set_acqb erts_no_atomic64_set
-#define erts_smp_atomic64_read_acqb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_acqb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_acqb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_acqb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_acqb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_acqb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_acqb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_acqb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_acqb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_acqb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_acqb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_relb erts_no_atomic64_set
-#define erts_smp_atomic64_set_relb erts_no_atomic64_set
-#define erts_smp_atomic64_read_relb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_relb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_relb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_relb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_relb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_relb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_relb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_relb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_relb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_relb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_relb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_ddrb erts_no_atomic64_set
-#define erts_smp_atomic64_set_ddrb erts_no_atomic64_set
-#define erts_smp_atomic64_read_ddrb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_ddrb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_ddrb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_ddrb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_ddrb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_ddrb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_ddrb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_ddrb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_ddrb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_ddrb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_ddrb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_rb erts_no_atomic64_set
-#define erts_smp_atomic64_set_rb erts_no_atomic64_set
-#define erts_smp_atomic64_read_rb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_rb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_rb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_rb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_rb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_rb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_rb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_rb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_rb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_rb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_rb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_wb erts_no_atomic64_set
-#define erts_smp_atomic64_set_wb erts_no_atomic64_set
-#define erts_smp_atomic64_read_wb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_wb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_wb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_wb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_wb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_wb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_wb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_wb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_wb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_wb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_wb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_set_dirty erts_no_atomic64_set
-#define erts_smp_atomic64_read_dirty erts_no_atomic64_read
-
-#endif /* !ERTS_SMP */
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE void
-erts_smp_thr_init(erts_smp_thr_init_data_t *id)
-{
-#ifdef ERTS_SMP
- erts_thr_init(id);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_thr_create(erts_smp_tid_t *tid, void * (*func)(void *), void *arg,
- erts_smp_thr_opts_t *opts)
-{
-#ifdef ERTS_SMP
- erts_thr_create(tid, func, arg, opts);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res)
-{
-#ifdef ERTS_SMP
- erts_thr_join(tid, thr_res);
-#endif
-}
-
-
-ERTS_GLB_INLINE void
-erts_smp_thr_detach(erts_smp_tid_t tid)
-{
-#ifdef ERTS_SMP
- erts_thr_detach(tid);
-#endif
-}
-
-
-ERTS_GLB_INLINE void
-erts_smp_thr_exit(void *res)
-{
-#ifdef ERTS_SMP
- erts_thr_exit(res);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_install_exit_handler(void (*exit_handler)(void))
-{
-#ifdef ERTS_SMP
- erts_thr_install_exit_handler(exit_handler);
-#endif
-}
-
-ERTS_GLB_INLINE erts_smp_tid_t
-erts_smp_thr_self(void)
-{
-#ifdef ERTS_SMP
- return erts_thr_self();
-#else
- return 0;
-#endif
-}
-
-
-ERTS_GLB_INLINE int
-erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y)
-{
-#ifdef ERTS_SMP
- return erts_equal_tids(x, y);
-#else
- return 1;
-#endif
-}
-
-
-#ifdef ERTS_HAVE_REC_MTX_INIT
-ERTS_GLB_INLINE void
-erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_rec_mtx_init(mtx);
-#endif
-}
-#endif
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_mtx_init_x(mtx, name, extra, 1);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_mtx_init_locked_x(mtx, name, extra, 1);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_mtx_init(mtx, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_mtx_init_locked(mtx, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_destroy(mtx);
-#endif
-}
-
-ERTS_GLB_INLINE int
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line)
-#else
-erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- return erts_mtx_trylock_x(mtx,file,line);
-#elif defined(ERTS_SMP)
- return erts_mtx_trylock(mtx);
-#else
- return 0;
-#endif
-
-}
-
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line)
-#else
-erts_smp_mtx_lock(erts_smp_mtx_t *mtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_mtx_lock_x(mtx, file, line);
-#elif defined(ERTS_SMP)
- erts_mtx_lock(mtx);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_unlock(erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_mtx_unlock(mtx);
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_mtx_is_locked(mtx);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_cnd_init(erts_smp_cnd_t *cnd)
-{
-#ifdef ERTS_SMP
- erts_cnd_init(cnd);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_cnd_destroy(erts_smp_cnd_t *cnd)
-{
-#ifdef ERTS_SMP
- erts_cnd_destroy(cnd);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_cnd_wait(erts_smp_cnd_t *cnd, erts_smp_mtx_t *mtx)
-{
-#ifdef ERTS_SMP
- erts_cnd_wait(cnd, mtx);
-#endif
-}
-
-/*
- * IMPORTANT note about erts_smp_cnd_signal() and erts_smp_cnd_broadcast()
- *
- * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast'
- * even though the associated mutex/mutexes isn't/aren't locked by the
- * caller. Our implementation do not allow that in order to avoid a
- * performance penalty. That is, all associated mutexes *need* to be
- * locked by the caller of erts_smp_cnd_signal()/erts_smp_cnd_broadcast()!
- */
-
-ERTS_GLB_INLINE void
-erts_smp_cnd_signal(erts_smp_cnd_t *cnd)
-{
-#ifdef ERTS_SMP
- erts_cnd_signal(cnd);
-#endif
-}
-
-
-ERTS_GLB_INLINE void
-erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd)
-{
-#ifdef ERTS_SMP
- erts_cnd_broadcast(cnd);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_set_reader_group(int no)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_set_reader_group(no);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name,
- Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_opt_x(rwmtx, opt, name, extra);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_x(rwmtx, name, extra);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_opt(rwmtx, opt, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init(rwmtx, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_destroy(rwmtx);
-#endif
-}
-
-ERTS_GLB_INLINE int
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
-#else
-erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- return erts_rwmtx_tryrlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
- return erts_rwmtx_tryrlock(rwmtx);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
-#else
-erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_rwmtx_rlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
- erts_rwmtx_rlock(rwmtx);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_runlock(rwmtx);
-#endif
-}
-
-
-ERTS_GLB_INLINE int
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
-#else
-erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- return erts_rwmtx_tryrwlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
- return erts_rwmtx_tryrwlock(rwmtx);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
-#else
-erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_rwmtx_rwlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
- erts_rwmtx_rwlock(rwmtx);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_rwunlock(rwmtx);
-#endif
-}
-
-#if 0 /* The following rwmtx function names are
- reserved for potential future use. */
-
-/* Try upgrade from r-locked state to rw-locked state */
-ERTS_GLB_INLINE int
-erts_smp_rwmtx_trywlock(erts_smp_rwmtx_t *rwmtx)
-{
- return 0;
-}
-
-/* Upgrade from r-locked state to rw-locked state */
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_wlock(erts_smp_rwmtx_t *rwmtx)
-{
-
-}
-
-/* Downgrade from rw-locked state to r-locked state */
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_wunlock(erts_smp_rwmtx_t *rwmtx)
-{
-
-}
-
-#endif
-
-ERTS_GLB_INLINE int
-erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_rwmtx_is_rlocked(mtx);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_rwmtx_is_rwlocked(mtx);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_spinlock_init_x(lock, name, extra);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name)
-{
-#ifdef ERTS_SMP
- erts_spinlock_init(lock, name);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock)
-{
-#ifdef ERTS_SMP
- erts_spinlock_destroy(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_spin_unlock(erts_smp_spinlock_t *lock)
-{
-#ifdef ERTS_SMP
- erts_spin_unlock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line)
-#else
-erts_smp_spin_lock(erts_smp_spinlock_t *lock)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_spin_lock_x(lock, file, line);
-#elif defined(ERTS_SMP)
- erts_spin_lock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_spinlock_is_locked(lock);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwlock_init_x(lock, name, extra);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name)
-{
-#ifdef ERTS_SMP
- erts_rwlock_init(lock, name);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock)
-{
-#ifdef ERTS_SMP
- erts_rwlock_destroy(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_read_unlock(erts_smp_rwlock_t *lock)
-{
-#ifdef ERTS_SMP
- erts_read_unlock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
-#else
-erts_smp_read_lock(erts_smp_rwlock_t *lock)
-#endif
-{
-#if defined(ERTS_ENABLE_LOCK_POSITION) && defined(ERTS_SMP)
- erts_read_lock_x(lock, file, line);
-#elif defined(ERTS_SMP)
- erts_read_lock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_write_unlock(erts_smp_rwlock_t *lock)
-{
-#ifdef ERTS_SMP
- erts_write_unlock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-#ifdef ERTS_ENABLE_LOCK_POSITION
-erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
-#else
-erts_smp_write_lock(erts_smp_rwlock_t *lock)
-#endif
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
- erts_write_lock_x(lock, file, line);
-#elif defined(ERTS_SMP)
- erts_write_lock(lock);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_rwlock_is_rlocked(lock);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock)
-{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- return erts_lc_rwlock_is_rwlocked(lock);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, char* keyname)
-{
-#ifdef ERTS_SMP
- erts_tsd_key_create(keyp,keyname);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_tsd_key_delete(erts_smp_tsd_key_t key)
-{
-#ifdef ERTS_SMP
- erts_tsd_key_delete(key);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value)
-{
-#ifdef ERTS_SMP
- erts_tsd_set(key, value);
-#endif
-}
-
-ERTS_GLB_INLINE void *
-erts_smp_tsd_get(erts_smp_tsd_key_t key)
-{
-#ifdef ERTS_SMP
- return erts_tsd_get(key);
-#else
- return NULL;
-#endif
-}
-
-#ifdef ERTS_THR_HAVE_SIG_FUNCS
-#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
-
-ERTS_GLB_INLINE void
-erts_smp_thr_sigmask(int how, const sigset_t *set, sigset_t *oset)
-{
-#ifdef ERTS_SMP
- erts_thr_sigmask(how, set, oset);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_thr_sigwait(const sigset_t *set, int *sig)
-{
-#ifdef ERTS_SMP
- erts_thr_sigwait(set, sig);
-#endif
-}
-
-#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */
-
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
-#endif /* ERL_SMP_H */
-
-#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS
-
-/* Deprecated functions to replace */
-
-#undef erts_smp_atomic_init
-#undef erts_smp_atomic_set
-#undef erts_smp_atomic_read
-#undef erts_smp_atomic_inctest
-#undef erts_smp_atomic_dectest
-#undef erts_smp_atomic_inc
-#undef erts_smp_atomic_dec
-#undef erts_smp_atomic_addtest
-#undef erts_smp_atomic_add
-#undef erts_smp_atomic_xchg
-#undef erts_smp_atomic_cmpxchg
-#undef erts_smp_atomic_bor
-#undef erts_smp_atomic_band
-
-#undef erts_smp_atomic32_init
-#undef erts_smp_atomic32_set
-#undef erts_smp_atomic32_read
-#undef erts_smp_atomic32_inctest
-#undef erts_smp_atomic32_dectest
-#undef erts_smp_atomic32_inc
-#undef erts_smp_atomic32_dec
-#undef erts_smp_atomic32_addtest
-#undef erts_smp_atomic32_add
-#undef erts_smp_atomic32_xchg
-#undef erts_smp_atomic32_cmpxchg
-#undef erts_smp_atomic32_bor
-#undef erts_smp_atomic32_band
-
-#endif
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
index 7d857ad326..d904e35e40 100644
--- a/erts/emulator/beam/erl_term.c
+++ b/erts/emulator/beam/erl_term.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -59,6 +59,42 @@ erts_set_literal_tag(Eterm *term, Eterm *hp_start, Eterm hsz)
#endif
}
+void
+erts_term_init(void)
+{
+#ifdef ERTS_ORDINARY_REF_MARKER
+ /* Ordinary and magic references of same size... */
+
+ ErtsRefThing ref_thing;
+
+ ERTS_CT_ASSERT(ERTS_ORDINARY_REF_MARKER == ~((Uint32)0));
+ ref_thing.m.header = ERTS_REF_THING_HEADER;
+ ref_thing.m.mb = (ErtsMagicBinary *) ~((UWord) 3);
+ ref_thing.m.next = (struct erl_off_heap_header *) ~((UWord) 3);
+ if (ref_thing.o.marker == ERTS_ORDINARY_REF_MARKER)
+ ERTS_INTERNAL_ERROR("Cannot differentiate between magic and ordinary references");
+
+ ERTS_CT_ASSERT(offsetof(ErtsORefThing,marker) != 0);
+ ERTS_CT_ASSERT(sizeof(ErtsORefThing) == sizeof(ErtsMRefThing));
+# ifdef ERTS_MAGIC_REF_THING_HEADER
+# error Magic ref thing header should not have been defined...
+# endif
+
+#else
+ /* Ordinary and magic references of different sizes... */
+
+# ifndef ERTS_MAGIC_REF_THING_HEADER
+# error Magic ref thing header should have been defined...
+# endif
+ ERTS_CT_ASSERT(sizeof(ErtsORefThing) != sizeof(ErtsMRefThing));
+
+#endif
+
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Eterm) == sizeof(ErtsORefThing));
+ ERTS_CT_ASSERT(ERTS_MAGIC_REF_THING_SIZE*sizeof(Eterm) == sizeof(ErtsMRefThing));
+
+}
+
/*
* XXX: define NUMBER_CODE() here when new representation is used
*/
@@ -95,8 +131,8 @@ ET_DEFINE_CHECKED(Eterm*,tuple_val,Wterm,is_tuple);
ET_DEFINE_CHECKED(struct erl_node_*,internal_pid_node,Eterm,is_internal_pid);
ET_DEFINE_CHECKED(struct erl_node_*,internal_port_node,Eterm,is_internal_port);
ET_DEFINE_CHECKED(Eterm*,internal_ref_val,Wterm,is_internal_ref);
-ET_DEFINE_CHECKED(Uint,internal_ref_data_words,Wterm,is_internal_ref);
-ET_DEFINE_CHECKED(Uint32*,internal_ref_data,Wterm,is_internal_ref);
+ET_DEFINE_CHECKED(Uint32*,internal_magic_ref_numbers,Wterm,is_internal_magic_ref);
+ET_DEFINE_CHECKED(Uint32*,internal_ordinary_ref_numbers,Wterm,is_internal_ordinary_ref);
ET_DEFINE_CHECKED(struct erl_node_*,internal_ref_node,Eterm,is_internal_ref);
ET_DEFINE_CHECKED(Eterm*,external_val,Wterm,is_external);
ET_DEFINE_CHECKED(Uint,external_data_words,Wterm,is_external);
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index c3234ee349..bddf403b0a 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,6 +23,8 @@
#include "erl_mmap.h"
+void erts_term_init(void);
+
typedef UWord Wterm; /* Full word terms */
struct erl_node_; /* Declared in erl_node_tables.h */
@@ -53,9 +55,6 @@ struct erl_node_; /* Declared in erl_node_tables.h */
#if defined(ARCH_64)
# define TAG_PTR_MASK__ 0x7
# if !defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
-# ifdef HIPE
-# error Hipe on 64-bit needs a real mmap as it does not support the literal tag
-# endif
# define TAG_LITERAL_PTR 0x4
# else
# undef TAG_LITERAL_PTR
@@ -268,7 +267,6 @@ _ET_DECLARE_CHECKED(Eterm*,list_val,Wterm)
#define is_byte(x) (((x) & ((~(Uint)0 << (_TAG_IMMED1_SIZE+8)) + _TAG_IMMED1_MASK)) == _TAG_IMMED1_SMALL)
#define is_valid_bit_size(x) (((Sint)(x)) >= 0 && ((x) & 0x7F) == _TAG_IMMED1_SMALL)
#define is_not_valid_bit_size(x) (!is_valid_bit_size((x)))
-#define MY_IS_SSMALL(x) (((Uint) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2)
#define _unchecked_unsigned_val(x) ((x) >> _TAG_IMMED1_SIZE)
_ET_DECLARE_CHECKED(Uint,unsigned_val,Eterm)
#define unsigned_val(x) _ET_APPLY(unsigned_val,(x))
@@ -315,7 +313,8 @@ _ET_DECLARE_CHECKED(Uint,header_arity,Eterm)
#define MAX_ARITYVAL ((((Uint)1) << 24) - 1)
#define ERTS_MAX_TUPLE_SIZE MAX_ARITYVAL
-#define make_arityval(sz) _make_header((sz),_TAG_HEADER_ARITYVAL)
+#define make_arityval(sz) (ASSERT((sz) <= MAX_ARITYVAL), \
+ _make_header((sz),_TAG_HEADER_ARITYVAL))
#define is_arity_value(x) (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_ARITYVAL)
#define is_sane_arity_value(x) ((((x) & _TAG_HEADER_MASK) == _TAG_HEADER_ARITYVAL) && \
(((x) >> _HEADER_ARITY_OFFS) <= MAX_ARITYVAL))
@@ -346,6 +345,9 @@ _ET_DECLARE_CHECKED(Uint,thing_subtag,Eterm)
*
* To help find code which makes unwarranted assumptions about zero,
* we now use a non-zero bit-pattern in debug mode.
+ *
+ * In order to be able to differentiata against values, the non-value
+ * needs to be tagged as a header of some sort.
*/
#if ET_DEBUG
# ifdef HIPE
@@ -356,7 +358,7 @@ _ET_DECLARE_CHECKED(Uint,thing_subtag,Eterm)
# define THE_NON_VALUE _make_header(0,_TAG_HEADER_FLOAT)
# endif
#else
-#define THE_NON_VALUE (0)
+#define THE_NON_VALUE (TAG_PRIMARY_HEADER)
#endif
#define is_non_value(x) ((x) == THE_NON_VALUE)
#define is_value(x) ((x) != THE_NON_VALUE)
@@ -718,73 +720,235 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_port_node,Eterm)
#define ERTS_MAX_REF_NUMBERS 3
#define ERTS_REF_NUMBERS ERTS_MAX_REF_NUMBERS
-#if defined(ARCH_64)
-# define ERTS_REF_WORDS (ERTS_REF_NUMBERS/2 + 1)
-# define ERTS_REF_32BIT_WORDS (ERTS_REF_NUMBERS+1)
-#else
-# define ERTS_REF_WORDS ERTS_REF_NUMBERS
-# define ERTS_REF_32BIT_WORDS ERTS_REF_NUMBERS
+#ifndef ERTS_ENDIANNESS
+# error ERTS_ENDIANNESS not defined...
#endif
-typedef struct {
- Eterm header;
- union {
- Uint32 ui32[ERTS_REF_32BIT_WORDS];
- Uint ui[ERTS_REF_WORDS];
- } data;
-} RefThing;
-
-#define REF_THING_SIZE (sizeof(RefThing)/sizeof(Uint))
-#define REF_THING_HEAD_SIZE (sizeof(Eterm)/sizeof(Uint))
+#if ERTS_REF_NUMBERS != 3
+# error "A new reference layout for 64-bit needs to be implemented..."
+#endif
-#define make_ref_thing_header(DW) \
- _make_header((DW)+REF_THING_HEAD_SIZE-1,_TAG_HEADER_REF)
+struct magic_binary;
#if defined(ARCH_64)
+# define ERTS_ORDINARY_REF_MARKER (~((Uint32) 0))
+
+typedef struct {
+ Eterm header;
+#if ERTS_ENDIANNESS <= 0
+ Uint32 marker;
+#endif
+ Uint32 num[ERTS_REF_NUMBERS];
+#if ERTS_ENDIANNESS > 0
+ Uint32 marker;
+#endif
+} ErtsORefThing;
+
+typedef struct {
+ Eterm header;
+ struct magic_binary *mb;
+ struct erl_off_heap_header* next;
+#if !ERTS_ENDIANNESS
+ Uint32 num[ERTS_REF_NUMBERS];
+ Uint32 marker;
+#endif
+} ErtsMRefThing;
+
/*
- * Ref layout on a 64-bit little endian machine:
+ * Ordinary ref layout on a 64-bit little endian machine:
*
* 63 31 0
* +--------------+--------------+
* | Thing word |
* +--------------+--------------+
- * | Data 0 | 32-bit arity |
+ * | Data 0 | 0xffffffff |
* +--------------+--------------+
* | Data 2 | Data 1 |
* +--------------+--------------+
*
- * Data is stored as an Uint32 array with 32-bit arity as first number.
+ * Ordinary ref layout on a 64-bit big endian machine:
+ *
+ * 63 31 0
+ * +--------------+--------------+
+ * | Thing word |
+ * +--------------+--------------+
+ * | Data 0 | Data 1 |
+ * +--------------+--------------+
+ * | Data 2 | 0xffffffff |
+ * +--------------+--------------+
+ *
+ * Magic Ref layout on a 64-bit machine:
+ *
+ * 63 31 0
+ * +--------------+--------------+
+ * | Thing word |
+ * +--------------+--------------+
+ * | Magic Binary Pointer |
+ * +--------------+--------------+
+ * | Next Off Heap Pointer |
+ * +--------------+--------------+
+ *
+ * Both pointers in the magic ref are 64-bit aligned. That is,
+ * least significant bits are zero. The marker 32-bit word is
+ * placed over the least significant bits of one of the pointers.
+ * That is, we can distinguish between magic and ordinary ref
+ * by looking at the marker field.
+ *
*/
#define write_ref_thing(Hp, R0, R1, R2) \
do { \
- ((RefThing *) (Hp))->header = make_ref_thing_header(ERTS_REF_WORDS); \
- ((RefThing *) (Hp))->data.ui32[0] = ERTS_REF_NUMBERS; \
- ((RefThing *) (Hp))->data.ui32[1] = (R0); \
- ((RefThing *) (Hp))->data.ui32[2] = (R1); \
- ((RefThing *) (Hp))->data.ui32[3] = (R2); \
+ ((ErtsORefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsORefThing *) (Hp))->marker = ERTS_ORDINARY_REF_MARKER; \
+ ((ErtsORefThing *) (Hp))->num[0] = (R0); \
+ ((ErtsORefThing *) (Hp))->num[1] = (R1); \
+ ((ErtsORefThing *) (Hp))->num[2] = (R2); \
} while (0)
-#else
+#if ERTS_ENDIANNESS
+/* Known big or little endian */
+
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ASSERT(erts_is_ref_numbers_magic((Binp)->refn)); \
+} while (0)
+
+#else /* !ERTS_ENDIANNESS */
+
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_MAGIC_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ((ErtsMRefThing *) (Hp))->marker = 0; \
+ ((ErtsMRefThing *) (Hp))->num[0] = (Binp)->refn[0]; \
+ ((ErtsMRefThing *) (Hp))->num[1] = (Binp)->refn[1]; \
+ ((ErtsMRefThing *) (Hp))->num[2] = (Binp)->refn[2]; \
+ ASSERT(erts_is_ref_numbers_magic((Binp)->refn)); \
+} while (0)
+
+#endif /* !ERTS_ENDIANNESS */
+
+#else /* ARCH_32 */
+
+typedef struct {
+ Eterm header;
+ Uint32 num[ERTS_REF_NUMBERS];
+} ErtsORefThing;
+
+typedef struct {
+ Eterm header;
+ struct magic_binary *mb;
+ struct erl_off_heap_header* next;
+} ErtsMRefThing;
+
#define write_ref_thing(Hp, R0, R1, R2) \
do { \
- ((RefThing *) (Hp))->header = make_ref_thing_header(ERTS_REF_WORDS); \
- ((RefThing *) (Hp))->data.ui32[0] = (R0); \
- ((RefThing *) (Hp))->data.ui32[1] = (R1); \
- ((RefThing *) (Hp))->data.ui32[2] = (R2); \
+ ((ErtsORefThing *) (Hp))->header = ERTS_REF_THING_HEADER; \
+ ((ErtsORefThing *) (Hp))->num[0] = (R0); \
+ ((ErtsORefThing *) (Hp))->num[1] = (R1); \
+ ((ErtsORefThing *) (Hp))->num[2] = (R2); \
+} while (0)
+
+#define write_magic_ref_thing(Hp, Ohp, Binp) \
+do { \
+ ((ErtsMRefThing *) (Hp))->header = ERTS_MAGIC_REF_THING_HEADER; \
+ ((ErtsMRefThing *) (Hp))->mb = (Binp); \
+ ((ErtsMRefThing *) (Hp))->next = (Ohp)->first; \
+ (Ohp)->first = (struct erl_off_heap_header*) (Hp); \
+ ASSERT(erts_is_ref_numbers_magic((Binp)->refn)); \
} while (0)
+#endif /* ARCH_32 */
+
+typedef union {
+ ErtsMRefThing m;
+ ErtsORefThing o;
+} ErtsRefThing;
+
+/* for copy sharing */
+#define BOXED_VISITED_MASK ((Eterm) 3)
+#define BOXED_VISITED ((Eterm) 1)
+#define BOXED_SHARED_UNPROCESSED ((Eterm) 2)
+#define BOXED_SHARED_PROCESSED ((Eterm) 3)
+
+#define ERTS_REF_THING_SIZE (sizeof(ErtsORefThing)/sizeof(Uint))
+#define ERTS_MAGIC_REF_THING_SIZE (sizeof(ErtsMRefThing)/sizeof(Uint))
+#define ERTS_MAX_INTERNAL_REF_SIZE (sizeof(ErtsRefThing)/sizeof(Uint))
+
+#define make_ref_thing_header(Words) \
+ _make_header((Words)-1,_TAG_HEADER_REF)
+
+#define ERTS_REF_THING_HEADER _make_header(ERTS_REF_THING_SIZE-1,_TAG_HEADER_REF)
+
+#if defined(ARCH_64) && ERTS_ENDIANNESS /* All internal refs of same size... */
+
+# undef ERTS_MAGIC_REF_THING_HEADER
+
+# define is_ref_thing_header(x) ((x) == ERTS_REF_THING_HEADER)
+
+#ifdef SHCOPY
+#define is_ordinary_ref_thing(x) \
+ (((ErtsRefThing *) (x))->o.marker == ERTS_ORDINARY_REF_MARKER)
+#else
+#define is_ordinary_ref_thing(x) \
+ (ASSERT(is_ref_thing_header((*((Eterm *)(x))) & ~BOXED_VISITED_MASK)), \
+ ((ErtsRefThing *) (x))->o.marker == ERTS_ORDINARY_REF_MARKER)
+#endif
+
+#define is_magic_ref_thing(x) \
+ (!is_ordinary_ref_thing((x)))
+
+#define is_internal_magic_ref(x) \
+ ((_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER) \
+ && is_magic_ref_thing(boxed_val((x))))
+
+#define is_internal_ordinary_ref(x) \
+ ((_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER) \
+ && is_ordinary_ref_thing(boxed_val((x))))
+
+#else /* Ordinary and magic references of different sizes... */
+
+# define ERTS_MAGIC_REF_THING_HEADER \
+ _make_header(ERTS_MAGIC_REF_THING_SIZE-1,_TAG_HEADER_REF)
+
+# define is_ref_thing_header(x) \
+ (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_REF)
+
+#define is_ordinary_ref_thing(x) \
+ (ASSERT(is_ref_thing_header(*((Eterm *)(x)))), \
+ *((Eterm *)(x)) == ERTS_REF_THING_HEADER)
+
+#define is_magic_ref_thing(x) \
+ (ASSERT(is_ref_thing_header(*((Eterm *)(x)))), \
+ *((Eterm *)(x)) == ERTS_MAGIC_REF_THING_HEADER)
+
+#define is_internal_magic_ref(x) \
+ (_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_MAGIC_REF_THING_HEADER)
+
+#define is_internal_ordinary_ref(x) \
+ (_unchecked_is_boxed((x)) && *boxed_val((x)) == ERTS_REF_THING_HEADER)
+
#endif
-#define is_ref_thing_header(x) (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_REF)
#define make_internal_ref(x) make_boxed((Eterm*)(x))
-#define _unchecked_ref_thing_ptr(x) \
- ((RefThing*) _unchecked_internal_ref_val(x))
-#define ref_thing_ptr(x) \
- ((RefThing*) internal_ref_val(x))
+#define _unchecked_ordinary_ref_thing_ptr(x) \
+ ((ErtsORefThing*) _unchecked_internal_ref_val(x))
+#define ordinary_ref_thing_ptr(x) \
+ ((ErtsORefThing*) internal_ref_val(x))
+
+#define _unchecked_magic_ref_thing_ptr(x) \
+ ((ErtsMRefThing*) _unchecked_internal_ref_val(x))
+#define magic_ref_thing_ptr(x) \
+ ((ErtsMRefThing*) internal_ref_val(x))
#define is_internal_ref(x) \
(_unchecked_is_boxed((x)) && is_ref_thing_header(*boxed_val((x))))
@@ -796,16 +960,21 @@ do { \
_ET_DECLARE_CHECKED(Eterm*,internal_ref_val,Wterm)
#define internal_ref_val(x) _ET_APPLY(internal_ref_val,(x))
-#define internal_thing_ref_data_words(t) (thing_arityval(*(Eterm*)(t)))
-#define _unchecked_internal_ref_data_words(x) \
- (_unchecked_thing_arityval(*_unchecked_internal_ref_val(x)))
-_ET_DECLARE_CHECKED(Uint,internal_ref_data_words,Wterm)
-#define internal_ref_data_words(x) _ET_APPLY(internal_ref_data_words,(x))
+#define internal_ordinary_thing_ref_numbers(ort) (((ErtsORefThing *)(ort))->num)
+#define _unchecked_internal_ordinary_ref_numbers(x) (internal_ordinary_thing_ref_numbers(_unchecked_ordinary_ref_thing_ptr(x)))
+_ET_DECLARE_CHECKED(Uint32*,internal_ordinary_ref_numbers,Wterm)
+#define internal_ordinary_ref_numbers(x) _ET_APPLY(internal_ordinary_ref_numbers,(x))
+
+#if defined(ARCH_64) && !ERTS_ENDIANNESS
+#define internal_magic_thing_ref_numbers(mrt) (((ErtsMRefThing *)(mrt))->num)
+#else
+#define internal_magic_thing_ref_numbers(mrt) (((ErtsMRefThing *)(mrt))->mb->refn)
+#endif
+
+#define _unchecked_internal_magic_ref_numbers(x) (internal_magic_thing_ref_numbers(_unchecked_magic_ref_thing_ptr(x)))
+_ET_DECLARE_CHECKED(Uint32*,internal_magic_ref_numbers,Wterm)
+#define internal_magic_ref_numbers(x) _ET_APPLY(internal_magic_ref_numbers,(x))
-#define internal_thing_ref_data(thing) ((thing)->data.ui32)
-#define _unchecked_internal_ref_data(x) (internal_thing_ref_data(_unchecked_ref_thing_ptr(x)))
-_ET_DECLARE_CHECKED(Uint32*,internal_ref_data,Wterm)
-#define internal_ref_data(x) _ET_APPLY(internal_ref_data,(x))
#define _unchecked_internal_ref_node(x) erts_this_node
_ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm)
@@ -1015,6 +1184,54 @@ _ET_DECLARE_CHECKED(struct erl_node_*,external_ref_node,Eterm)
#define is_map(x) (is_boxed((x)) && is_map_header(*boxed_val(x)))
#define is_not_map(x) (!is_map(x))
+#define MAP_HEADER(hp, sz, keys) \
+ ((hp)[0] = MAP_HEADER_FLATMAP, \
+ (hp)[1] = sz, \
+ (hp)[2] = keys)
+
+#define MAP_SZ(sz) (MAP_HEADER_FLATMAP_SZ + 2*sz + 1)
+
+#define MAP0_SZ MAP_SZ(0)
+#define MAP1_SZ MAP_SZ(1)
+#define MAP2_SZ MAP_SZ(2)
+#define MAP3_SZ MAP_SZ(3)
+#define MAP4_SZ MAP_SZ(4)
+#define MAP5_SZ MAP_SZ(5)
+#define MAP0(hp) \
+ (MAP_HEADER(hp, 0, TUPLE0(hp+MAP_HEADER_FLATMAP_SZ)), \
+ make_flatmap(hp))
+#define MAP1(hp, k1, v1) \
+ (MAP_HEADER(hp, 1, TUPLE1(hp+1+MAP_HEADER_FLATMAP_SZ, k1)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ make_flatmap(hp))
+#define MAP2(hp, k1, v1, k2, v2) \
+ (MAP_HEADER(hp, 2, TUPLE2(hp+2+MAP_HEADER_FLATMAP_SZ, k1, k2)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ make_flatmap(hp))
+#define MAP3(hp, k1, v1, k2, v2, k3, v3) \
+ (MAP_HEADER(hp, 3, TUPLE3(hp+3+MAP_HEADER_FLATMAP_SZ, k1, k2, k3)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \
+ make_flatmap(hp))
+#define MAP4(hp, k1, v1, k2, v2, k3, v3, k4, v4) \
+ (MAP_HEADER(hp, 4, TUPLE4(hp+4+MAP_HEADER_FLATMAP_SZ, k1, k2, k3, k4)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+3] = v4, \
+ make_flatmap(hp))
+#define MAP5(hp, k1, v1, k2, v2, k3, v3, k4, v4, k5, v5) \
+ (MAP_HEADER(hp, 5, TUPLE5(hp+5+MAP_HEADER_FLATMAP_SZ, k1, k2, k3, k4, k5)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+3] = v4, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+4] = v5, \
+ make_flatmap(hp))
+
+
/* number tests */
#define is_integer(x) (is_small(x) || is_big(x))
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 700ed90def..bac437efe9 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -80,7 +80,6 @@
#include "erl_thr_progress.h"
#include "global.h"
-#ifdef ERTS_SMP
#define ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE 0
@@ -321,13 +320,23 @@ tmp_thr_prgr_data(ErtsSchedulerData *esdp)
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(esdp);
if (!tpd) {
- /*
- * We only allocate the part up to the wakeup_request field
- * which is the first field only used by registered threads
- */
- tpd = erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA,
- offsetof(ErtsThrPrgrData, wakeup_request));
- init_tmp_thr_prgr_data(tpd);
+ /*
+ * We only allocate the part up to the wakeup_request field which is
+ * the first field only used by registered threads
+ */
+ size_t alloc_size = offsetof(ErtsThrPrgrData, wakeup_request);
+
+ /* We may land here as a result of unmanaged_delay being called from
+ * the lock counting module, which in turn might be called from within
+ * the allocator, so we use plain malloc to avoid deadlocks. */
+ tpd =
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ malloc(alloc_size);
+#else
+ erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA, alloc_size);
+#endif
+
+ init_tmp_thr_prgr_data(tpd);
}
return tpd;
@@ -337,8 +346,13 @@ static ERTS_INLINE void
return_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
{
if (tpd->is_temporary) {
- erts_tsd_set(erts_thr_prgr_data_key__, NULL);
- erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+ erts_tsd_set(erts_thr_prgr_data_key__, NULL);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ free(tpd);
+#else
+ erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+#endif
}
}
@@ -494,6 +508,10 @@ init_wakeup_request_array(ErtsThrPrgrVal *w)
}
}
+ErtsThrPrgrData *erts_thr_progress_data(void) {
+ return erts_tsd_get(erts_thr_prgr_data_key__);
+}
+
void
erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
{
@@ -537,7 +555,7 @@ erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
}
-void
+ErtsThrPrgrData *
erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
ErtsThrPrgrCallbacks *callbacks,
int pref_wakeup)
@@ -616,6 +634,7 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
wakeup_managed(id);
}
callbacks->finalize_wait(callbacks->arg);
+ return tpd;
}
static ERTS_INLINE int
@@ -782,7 +801,7 @@ leader_update(ErtsThrPrgrData *tpd)
== ERTS_THR_PRGR_LFLG_NO_LEADER))
&& got_sched_wakeups()) {
/* Someone need to make progress */
- wakeup_managed(0);
+ wakeup_managed(tpd->id);
}
}
}
@@ -835,23 +854,22 @@ update(ErtsThrPrgrData *tpd)
}
int
-erts_thr_progress_update(ErtsSchedulerData *esdp)
+erts_thr_progress_update(ErtsThrPrgrData *tpd)
{
- return update(thr_prgr_data(esdp));
+ return update(tpd);
}
int
-erts_thr_progress_leader_update(ErtsSchedulerData *esdp)
+erts_thr_progress_leader_update(ErtsThrPrgrData *tpd)
{
- return leader_update(thr_prgr_data(esdp));
+ return leader_update(tpd);
}
void
-erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
+erts_thr_progress_prepare_wait(ErtsThrPrgrData *tpd)
{
erts_aint32_t lflgs;
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
@@ -870,14 +888,13 @@ erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
== ERTS_THR_PRGR_LFLG_NO_LEADER
&& got_sched_wakeups()) {
/* Someone need to make progress */
- wakeup_managed(0);
+ wakeup_managed(tpd->id);
}
}
void
-erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
+erts_thr_progress_finalize_wait(ErtsThrPrgrData *tpd)
{
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
ErtsThrPrgrVal current, val;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -907,9 +924,8 @@ erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
}
void
-erts_thr_progress_active(ErtsSchedulerData *esdp, int on)
+erts_thr_progress_active(ErtsThrPrgrData *tpd, int on)
{
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
@@ -959,7 +975,7 @@ unmanaged_continue(ErtsThrPrgrDelayHandle handle)
== (ERTS_THR_PRGR_LFLG_NO_LEADER|ERTS_THR_PRGR_LFLG_WAITING_UM)
&& got_sched_wakeups()) {
/* Others waiting for us... */
- wakeup_managed(0);
+ wakeup_managed(1);
}
}
}
@@ -1168,10 +1184,10 @@ request_wakeup_unmanaged(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
}
void
-erts_thr_progress_wakeup(ErtsSchedulerData *esdp,
+erts_thr_progress_wakeup(ErtsThrPrgrData *tpd,
ErtsThrPrgrVal value)
{
- ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
+
ASSERT(!tpd->is_temporary);
if (tpd->is_managed)
request_wakeup_managed(tpd, value);
@@ -1498,4 +1514,3 @@ void erts_thr_progress_dbg_print_state(void)
}
-#endif
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index b2894ba1fe..00a9e61407 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -28,23 +28,11 @@
* Author: Rickard Green
*/
-#if !defined(ERL_THR_PROGRESS_H__TSD_TYPE__)
+#ifndef ERL_THR_PROGRESS_H__TSD_TYPE__
#define ERL_THR_PROGRESS_H__TSD_TYPE__
#include "sys.h"
-#ifndef ERTS_SMP
-
-#define erts_smp_thr_progress_block() ((void) 0)
-#define erts_smp_thr_progress_unblock() ((void) 0)
-#define erts_smp_thr_progress_is_blocking() 1
-
-#else /* ERTS_SMP */
-
-#define erts_smp_thr_progress_block erts_thr_progress_block
-#define erts_smp_thr_progress_unblock erts_thr_progress_unblock
-#define erts_smp_thr_progress_is_blocking erts_thr_progress_is_blocking
-
void erts_thr_progress_block(void);
void erts_thr_progress_unblock(void);
int erts_thr_progress_is_blocking(void);
@@ -87,13 +75,10 @@ typedef struct {
int erts_thr_progress_fatal_error_block(ErtsThrPrgrData *tmp_tpd_bufp);
void erts_thr_progress_fatal_error_wait(SWord timeout);
-#endif /* ERTS_SMP */
typedef struct ErtsThrPrgrLaterOp_ ErtsThrPrgrLaterOp;
struct ErtsThrPrgrLaterOp_ {
-#ifdef ERTS_SMP
ErtsThrPrgrVal later;
-#endif
void (*func)(void *);
void *data;
ErtsThrPrgrLaterOp *next;
@@ -107,7 +92,6 @@ struct ErtsThrPrgrLaterOp_ {
#include "erl_threads.h"
#include "erl_process.h"
-#ifdef ERTS_SMP
/* ERTS_THR_PRGR_VAL_FIRST should only be used when initializing... */
#define ERTS_THR_PRGR_VAL_FIRST ((ErtsThrPrgrVal) 0)
@@ -139,22 +123,24 @@ extern ErtsThrPrgr erts_thr_prgr__;
void erts_thr_progress_pre_init(void);
void erts_thr_progress_init(int no_schedulers, int managed, int unmanaged);
-void erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
- ErtsThrPrgrCallbacks *,
- int);
+ErtsThrPrgrData *erts_thr_progress_register_managed_thread(
+ ErtsSchedulerData *esdp, ErtsThrPrgrCallbacks *, int);
void erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *);
-void erts_thr_progress_active(ErtsSchedulerData *esdp, int on);
-void erts_thr_progress_wakeup(ErtsSchedulerData *esdp,
+void erts_thr_progress_active(ErtsThrPrgrData *, int on);
+void erts_thr_progress_wakeup(ErtsThrPrgrData *,
ErtsThrPrgrVal value);
-int erts_thr_progress_update(ErtsSchedulerData *esdp);
-int erts_thr_progress_leader_update(ErtsSchedulerData *esdp);
-void erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp);
-void erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp);
+int erts_thr_progress_update(ErtsThrPrgrData *);
+int erts_thr_progress_leader_update(ErtsThrPrgrData *);
+void erts_thr_progress_prepare_wait(ErtsThrPrgrData *);
+void erts_thr_progress_finalize_wait(ErtsThrPrgrData *);
ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay__(void);
void erts_thr_progress_unmanaged_continue__(int umrefc_ix);
+ErtsThrPrgrData *erts_thr_progress_data(void);
void erts_thr_progress_dbg_print_state(void);
+ERTS_GLB_INLINE ErtsThrPrgrData *erts_thr_prgr_data(ErtsSchedulerData *esdp);
+
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc);
@@ -177,6 +163,15 @@ ERTS_GLB_INLINE int erts_thr_progress_has_reached(ErtsThrPrgrVal val);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE ErtsThrPrgrData *
+erts_thr_prgr_data(ErtsSchedulerData *esdp) {
+ if (esdp) {
+ return &esdp->thr_progress_data;
+ } else {
+ return erts_thr_progress_data();
+ }
+}
+
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc)
{
@@ -324,6 +319,5 @@ erts_thr_progress_has_reached(ErtsThrPrgrVal val)
#endif
-#endif /* ERTS_SMP */
#endif
diff --git a/erts/emulator/beam/erl_thr_queue.c b/erts/emulator/beam/erl_thr_queue.c
index f56d0828dd..aab7c199d2 100644
--- a/erts/emulator/beam/erl_thr_queue.c
+++ b/erts/emulator/beam/erl_thr_queue.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -87,32 +87,10 @@
#define ERTS_THR_Q_MAX_FINI_DEQ_OPS 50
-#ifdef ERTS_SMP
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(sl_element,
ErtsThrQElement_t,
1000,
ERTS_ALC_T_THR_Q_EL_SL)
-#else
-
-static void
-init_sl_element_alloc(void)
-{
-}
-
-static ErtsThrQElement_t *
-sl_element_alloc(void)
-{
- return erts_alloc(ERTS_ALC_T_THR_Q_EL_SL,
- sizeof(ErtsThrQElement_t));
-}
-
-static void
-sl_element_free(ErtsThrQElement_t *p)
-{
- erts_free(ERTS_ALC_T_THR_Q_EL_SL, p);
-}
-
-#endif
#define ErtsThrQDirtyReadEl(A) \
((ErtsThrQElement_t *) erts_atomic_read_dirty((A)))
@@ -135,14 +113,6 @@ static void noop_callback(void *arg) { }
void
erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
{
-#ifndef USE_THREADS
- q->init = *qi;
- if (!q->init.notify)
- q->init.notify = noop_callback;
- q->first = NULL;
- q->last = NULL;
- q->q.blk = NULL;
-#else
erts_atomic_init_nob(&q->tail.data.marker.next, ERTS_AINT_NULL);
q->tail.data.marker.data.ptr = NULL;
erts_atomic_init_nob(&q->tail.data.last,
@@ -164,10 +134,8 @@ erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
q->head.deq_fini.automatic = qi->auto_finalize_dequeue;
q->head.deq_fini.start = NULL;
q->head.deq_fini.end = NULL;
-#ifdef ERTS_SMP
q->head.next.thr_progress = erts_thr_progress_current();
q->head.next.thr_progress_reached = 1;
-#endif
q->head.next.um_refc_ix = 1;
q->head.next.unref_end = &q->tail.data.marker;
q->head.used_marker = 1;
@@ -176,15 +144,12 @@ erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
q->q.finalizing = 0;
q->q.live = qi->live.queue;
q->q.blk = NULL;
-#endif
}
ErtsThrQCleanState_t
erts_thr_q_finalize(ErtsThrQ_t *q)
{
-#ifdef USE_THREADS
q->q.finalizing = 1;
-#endif
while (erts_thr_q_dequeue(q));
return erts_thr_q_clean(q);
}
@@ -229,7 +194,6 @@ erts_thr_q_destroy(ErtsThrQ_t *q)
return erts_thr_q_finalize(q);
}
-#ifdef USE_THREADS
static void
destroy(ErtsThrQ_t *q)
@@ -249,7 +213,6 @@ destroy(ErtsThrQ_t *q)
erts_free(atype, q->q.blk);
}
-#endif
static ERTS_INLINE ErtsThrQElement_t *
element_live_alloc(ErtsThrQLive_t live)
@@ -267,11 +230,7 @@ static ERTS_INLINE ErtsThrQElement_t *
element_alloc(ErtsThrQ_t *q)
{
ErtsThrQLive_t live;
-#ifdef USE_THREADS
live = q->tail.data.live;
-#else
- live = q->init.live.objects;
-#endif
return element_live_alloc(live);
}
@@ -291,15 +250,10 @@ static ERTS_INLINE void
element_free(ErtsThrQ_t *q, ErtsThrQElement_t *el)
{
ErtsThrQLive_t live;
-#ifdef USE_THREADS
live = q->head.live;
-#else
- live = q->init.live.objects;
-#endif
element_live_free(live, el);
}
-#ifdef USE_THREADS
static ERTS_INLINE ErtsThrQElement_t *
enqueue_managed(ErtsThrQ_t *q, ErtsThrQElement_t *this)
@@ -423,11 +377,9 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify)
return ERTS_THR_Q_CLEAN;
}
-#ifdef ERTS_SMP
if (q->head.next.thr_progress_reached
|| erts_thr_progress_has_reached(q->head.next.thr_progress)) {
q->head.next.thr_progress_reached = 1;
-#endif
um_refc_ix = q->head.next.um_refc_ix;
if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) {
/* Move unreferenced end pointer forward... */
@@ -439,23 +391,17 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify)
ilast = (erts_aint_t) enqueue_marker(q, NULL);
if (q->head.unref_end == (ErtsThrQElement_t *) ilast)
- ERTS_SMP_MEMORY_BARRIER;
+ ERTS_THR_MEMORY_BARRIER;
else {
q->head.next.unref_end = (ErtsThrQElement_t *) ilast;
-#ifdef ERTS_SMP
q->head.next.thr_progress = erts_thr_progress_later(NULL);
-#endif
erts_atomic32_set_relb(&q->tail.data.um_refc_ix,
um_refc_ix);
q->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
-#ifdef ERTS_SMP
q->head.next.thr_progress_reached = 0;
-#endif
}
}
-#ifdef ERTS_SMP
}
-#endif
head = ErtsThrQDirtyReadEl(&q->head.head);
if (q->head.first == head) {
@@ -489,9 +435,7 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify)
check_thr_progress:
-#ifdef ERTS_SMP
if (q->head.next.thr_progress_reached)
-#endif
{
int um_refc_ix = q->head.next.um_refc_ix;
if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) {
@@ -505,24 +449,16 @@ check_thr_progress:
return ERTS_THR_Q_NEED_THR_PRGR;
}
-#endif
ErtsThrQCleanState_t
erts_thr_q_clean(ErtsThrQ_t *q)
{
-#ifdef USE_THREADS
return clean(q, ERTS_THR_Q_MAX_SCHED_CLEAN_OPS, 0);
-#else
- return ERTS_THR_Q_CLEAN;
-#endif
}
ErtsThrQCleanState_t
erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty)
{
-#ifndef USE_THREADS
- return ERTS_THR_Q_CLEAN;
-#else
ErtsThrQElement_t *head = ErtsThrQDirtyReadEl(&q->head.head);
if (ensure_empty) {
erts_aint_t inext;
@@ -553,39 +489,21 @@ erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty)
if (q->head.first != q->head.unref_end)
return ERTS_THR_Q_DIRTY;
-#ifdef ERTS_SMP
if (q->head.next.thr_progress_reached)
-#endif
{
int um_refc_ix = q->head.next.um_refc_ix;
if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0)
return ERTS_THR_Q_DIRTY;
}
return ERTS_THR_Q_NEED_THR_PRGR;
-#endif
}
static void
enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
{
-#ifndef USE_THREADS
- ASSERT(data);
-
- this->next = NULL;
- this->data.ptr = data;
-
- if (q->last)
- q->last->next = this;
- else {
- q->first = q->last = this;
- q->init.notify(q->init.arg);
- }
-#else
int notify;
int um_refc_ix = 0;
-#ifdef ERTS_SMP
int unmanaged_thread;
-#endif
#if ERTS_THR_Q_DBG_CHK_DATA
if (!data)
@@ -596,10 +514,8 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
this->data.ptr = data;
-#ifdef ERTS_SMP
unmanaged_thread = !erts_thr_progress_is_managed_thread();
if (unmanaged_thread)
-#endif
{
um_refc_ix = erts_atomic32_read_acqb(&q->tail.data.um_refc_ix);
while (1) {
@@ -616,9 +532,7 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
notify = this == enqueue_managed(q, this);
-#ifdef ERTS_SMP
if (unmanaged_thread)
-#endif
{
if (notify)
erts_atomic_dec_relb(&q->tail.data.um_refc[um_refc_ix]);
@@ -627,7 +541,6 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
}
if (notify)
q->tail.data.notify(q->tail.data.arg);
-#endif
}
void
@@ -645,9 +558,6 @@ erts_thr_q_prepare_enqueue(ErtsThrQ_t *q)
int
erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp)
{
-#ifndef USE_THREADS
- return 0;
-#else
#ifdef DEBUG
if (!q->head.deq_fini.start) {
ASSERT(!q->head.deq_fini.end);
@@ -670,14 +580,12 @@ erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp)
q->head.deq_fini.start = NULL;
q->head.deq_fini.end = NULL;
return fdp->start != NULL;
-#endif
}
void
erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0,
ErtsThrQFinDeQ_t *fdp1)
{
-#ifdef USE_THREADS
if (fdp1->start) {
if (fdp0->end)
ErtsThrQDirtySetEl(&fdp0->end->next, fdp1->start);
@@ -685,13 +593,11 @@ erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0,
fdp0->start = fdp1->start;
fdp0->end = fdp1->end;
}
-#endif
}
int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state)
{
-#ifdef USE_THREADS
ErtsThrQElement_t *start = state->start;
if (start) {
ErtsThrQLive_t live;
@@ -710,17 +616,14 @@ int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state)
return 1; /* More to do */
state->end = NULL;
}
-#endif
return 0;
}
void
erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *state)
{
-#ifdef USE_THREADS
state->start = NULL;
state->end = NULL;
-#endif
}
@@ -734,22 +637,6 @@ erts_thr_q_enqueue_prepared(ErtsThrQ_t *q, void *data, ErtsThrQPrepEnQ_t *prep)
void *
erts_thr_q_dequeue(ErtsThrQ_t *q)
{
-#ifndef USE_THREADS
- void *res;
- ErtsThrQElement_t *tmp;
-
- if (!q->first)
- return NULL;
- tmp = q->first;
- res = tmp->data.ptr;
- q->first = tmp->next;
- if (!q->first)
- q->last = NULL;
-
- element_free(q, tmp);
-
- return res;
-#else
ErtsThrQElement_t *head;
erts_aint_t inext;
void *res;
@@ -778,7 +665,6 @@ erts_thr_q_dequeue(ErtsThrQ_t *q)
? ERTS_THR_Q_MAX_DEQUEUE_CLEAN_OPS
: ERTS_THR_Q_MAX_SCHED_CLEAN_OPS), 1);
return res;
-#endif
}
#ifdef USE_LTTNG_VM_TRACEPOINTS
@@ -786,14 +672,6 @@ int
erts_thr_q_length_dirty(ErtsThrQ_t *q)
{
int n = 0;
-#ifndef USE_THREADS
- void *res;
- ErtsThrQElement_t *tmp;
-
- for (tmp = q->first; tmp != NULL; tmp = tmp->next) {
- n++;
- }
-#else
ErtsThrQElement_t *e;
erts_aint_t inext;
@@ -808,7 +686,6 @@ erts_thr_q_length_dirty(ErtsThrQ_t *q)
}
inext = erts_atomic_read_acqb(&e->next);
}
-#endif
return n;
}
#endif
diff --git a/erts/emulator/beam/erl_thr_queue.h b/erts/emulator/beam/erl_thr_queue.h
index 705a67af4c..29b58063ac 100644
--- a/erts/emulator/beam/erl_thr_queue.h
+++ b/erts/emulator/beam/erl_thr_queue.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -78,11 +78,7 @@ typedef struct ErtsThrQElement_t_ ErtsThrQElement_t;
typedef struct ErtsThrQElement_t ErtsThrQPrepEnQ_t;
struct ErtsThrQElement_t_ {
-#ifdef USE_THREADS
erts_atomic_t next;
-#else
- ErtsThrQElement_t *next;
-#endif
union {
erts_atomic_t atmc;
void *ptr;
@@ -100,7 +96,6 @@ typedef enum {
ERTS_THR_Q_DIRTY,
} ErtsThrQCleanState_t;
-#ifdef USE_THREADS
typedef struct {
ErtsThrQElement_t marker;
@@ -108,9 +103,7 @@ typedef struct {
erts_atomic_t um_refc[2];
erts_atomic32_t um_refc_ix;
ErtsThrQLive_t live;
-#ifdef ERTS_SMP
erts_atomic32_t thr_prgr_clean_scheduled;
-#endif
void *arg;
void (*notify)(void *);
} ErtsThrQTail_t;
@@ -141,10 +134,8 @@ struct ErtsThrQ_t_ {
ErtsThrQElement_t *end;
} deq_fini;
struct {
-#ifdef ERTS_SMP
ErtsThrPrgrVal thr_progress;
int thr_progress_reached;
-#endif
int um_refc_ix;
ErtsThrQElement_t *unref_end;
} next;
@@ -159,18 +150,6 @@ struct ErtsThrQ_t_ {
} q;
};
-#else /* !USE_THREADS */
-
-struct ErtsThrQ_t_ {
- ErtsThrQInit_t init;
- ErtsThrQElement_t *first;
- ErtsThrQElement_t *last;
- struct {
- void *blk;
- } q;
-};
-
-#endif
void erts_thr_q_init(void);
void erts_thr_q_initialize(ErtsThrQ_t *, ErtsThrQInit_t *);
@@ -194,19 +173,15 @@ void erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *);
int erts_thr_q_length_dirty(ErtsThrQ_t *);
#endif
-#ifdef ERTS_SMP
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_q_need_thr_progress(ErtsThrQ_t *q);
-#endif
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#ifdef ERTS_SMP
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_q_need_thr_progress(ErtsThrQ_t *q)
{
return q->head.next.thr_progress;
}
-#endif
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index eccd49f2a9..aedceb6fc2 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -45,11 +45,6 @@
* Data dependency read barrier. Orders *only* loads
* according to data dependency across the barrier.
*
- * If thread support has been disabled, these barriers will become no-ops.
- *
- * If the prefix ERTS_THR_ is replaced with ERTS_SMP_, the barriers will
- * be enabled only in the SMP enabled runtime system.
- *
* --- Atomic operations ---
*
* Atomics operations exist for 32-bit, word size, and double word size
@@ -86,20 +81,6 @@
* barrier. Load in atomic operation is ordered
* before the barrier.
*
- * If thread support has been disabled, these functions are mapped to
- * functions that performs the same operation, but aren't atomic
- * and don't imply any memory barriers.
- *
- * If the atomic operations are prefixed with erts_smp_ instead of only
- * erts_ the atomic operations will only be atomic in the SMP enabled
- * runtime system, and will be mapped to non-atomic operations without
- * memory barriers in the runtime system without SMP support. Atomic
- * operations with erts_smp_ prefix should use the atomic types
- * erts_smp_atomic32_t, erts_smp_atomic_t, and erts_smp_dw_atomic_t
- * instead of erts_atomic32_t, erts_atomic_t, and erts_dw_atomic_t. The
- * integer data types erts_aint32_t, erts_aint_t, and erts_dw_atomic_t
- * are the same.
- *
* --- 32-bit atomic operations ---
*
* The following 32-bit atomic operations exist. <B> should be
@@ -259,13 +240,15 @@
#include "sys.h"
-#ifdef USE_THREADS
+#include "erl_lock_flags.h"
+#include "erl_term.h"
+
#define ETHR_TRY_INLINE_FUNCS
#include "ethread.h"
+
#include "erl_lock_check.h"
#include "erl_lock_count.h"
-#include "erl_term.h"
#if defined(__GLIBC__) && (__GLIBC__ << 16) + __GLIBC_MINOR__ < (2 << 16) + 4
/*
@@ -307,9 +290,11 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
-
} erts_mtx_t;
typedef ethr_cond erts_cnd_t;
@@ -320,7 +305,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_rwmtx_t;
@@ -365,7 +353,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_spinlock_t;
@@ -376,7 +367,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_rwlock_t;
@@ -391,76 +385,6 @@ __decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
# define ERTS_HAVE_REC_MTX_INIT ETHR_HAVE_ETHR_REC_MUTEX_INIT
#endif
-#else /* #ifdef USE_THREADS */
-
-#define ERTS_THR_MEMORY_BARRIER
-#define ERTS_THR_WRITE_MEMORY_BARRIER
-#define ERTS_THR_READ_MEMORY_BARRIER
-#define ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER
-
-#define ERTS_THR_OPTS_DEFAULT_INITER 0
-typedef int erts_thr_opts_t;
-typedef int erts_thr_init_data_t;
-typedef int erts_thr_late_init_data_t;
-typedef int erts_tid_t;
-typedef int erts_mtx_t;
-typedef int erts_cnd_t;
-#define ERTS_RWMTX_OPT_DEFAULT_INITER {0}
-#define ERTS_RWMTX_TYPE_NORMAL 0
-#define ERTS_RWMTX_TYPE_FREQUENT_READ 0
-#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
-#define ERTS_RWMTX_LONG_LIVED 0
-#define ERTS_RWMTX_SHORT_LIVED 0
-#define ERTS_RWMTX_UNKNOWN_LIVED 0
-typedef struct {
- char type;
- char lived;
- int main_spincount;
- int aux_spincount;
-} erts_rwmtx_opt_t;
-typedef int erts_rwmtx_t;
-typedef int erts_tsd_key_t;
-typedef int erts_tse_t;
-
-typedef struct { SWord sint[2]; } erts_dw_aint_t;
-typedef SWord erts_aint_t;
-typedef Sint32 erts_aint32_t;
-typedef Sint64 erts_aint64_t;
-
-#define erts_dw_atomic_t erts_dw_aint_t
-#define erts_atomic_t erts_aint_t
-#define erts_atomic32_t erts_aint32_t
-#define erts_atomic64_t erts_aint64_t
-
-#if __GNUC__ > 2
-typedef struct { } erts_spinlock_t;
-typedef struct { } erts_rwlock_t;
-#else
-typedef struct { int gcc_is_buggy; } erts_spinlock_t;
-typedef struct { int gcc_is_buggy; } erts_rwlock_t;
-#endif
-
-#ifdef WORDS_BIGENDIAN
-#define ERTS_DW_AINT_LOW_WORD 1
-#define ERTS_DW_AINT_HIGH_WORD 0
-#else
-#define ERTS_DW_AINT_LOW_WORD 0
-#define ERTS_DW_AINT_HIGH_WORD 1
-#endif
-
-#define ERTS_MTX_INITER 0
-#define ERTS_CND_INITER 0
-#define ERTS_THR_INIT_DATA_DEF_INITER 0
-
-#define ERTS_HAVE_REC_MTX_INIT 1
-
-#endif /* #ifdef USE_THREADS */
-
-#define erts_no_dw_atomic_t erts_dw_aint_t
-#define erts_no_atomic_t erts_aint_t
-#define erts_no_atomic32_t erts_aint32_t
-#define erts_no_atomic64_t erts_aint64_t
-
#define ERTS_AINT_NULL ((erts_aint_t) NULL)
#define ERTS_AINT_T_MAX (~(((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1)))
@@ -479,16 +403,14 @@ ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_thr_getname(erts_tid_t tid, char *buf, size_t len);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra,
- int enable_lcnt);
-ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra,
- Uint16 opt, int enable_lcnt);
-ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
- char *name,
- Eterm extra,
- int enable_lcnt);
-ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
+ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_mtx_trylock_x(erts_mtx_t *mtx, char *file,
@@ -507,18 +429,15 @@ ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no);
-ERTS_GLB_INLINE void erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name);
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
@@ -535,89 +454,10 @@ ERTS_GLB_INLINE void erts_rwmtx_runlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx);
-
-ERTS_GLB_INLINE void erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val);
-ERTS_GLB_INLINE void erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val);
-ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var,
- erts_no_dw_atomic_t *val,
- erts_no_dw_atomic_t *old_val);
-ERTS_GLB_INLINE void erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read(erts_no_atomic_t *var);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_inc_read(erts_no_atomic_t *incp);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_dec_read(erts_no_atomic_t *decp);
-ERTS_GLB_INLINE void erts_no_atomic_inc(erts_no_atomic_t *incp);
-ERTS_GLB_INLINE void erts_no_atomic_dec(erts_no_atomic_t *decp);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_add_read(erts_no_atomic_t *addp,
- erts_aint_t i);
-ERTS_GLB_INLINE void erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bor(erts_no_atomic_t *var,
- erts_aint_t mask);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_band(erts_no_atomic_t *var,
- erts_aint_t mask);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_xchg(erts_no_atomic_t *xchgp,
- erts_aint_t new);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t expected);
-ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bset(erts_no_atomic_t *var,
- erts_aint_t mask,
- erts_aint_t set);
-ERTS_GLB_INLINE void erts_no_atomic32_set(erts_no_atomic32_t *var,
- erts_aint32_t i);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read(erts_no_atomic32_t *var);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_inc_read(erts_no_atomic32_t *incp);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_dec_read(erts_no_atomic32_t *decp);
-ERTS_GLB_INLINE void erts_no_atomic32_inc(erts_no_atomic32_t *incp);
-ERTS_GLB_INLINE void erts_no_atomic32_dec(erts_no_atomic32_t *decp);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_add_read(erts_no_atomic32_t *addp,
- erts_aint32_t i);
-ERTS_GLB_INLINE void erts_no_atomic32_add(erts_no_atomic32_t *addp,
- erts_aint32_t i);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bor(erts_no_atomic32_t *var,
- erts_aint32_t mask);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_band(erts_no_atomic32_t *var,
- erts_aint32_t mask);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp,
- erts_aint32_t new);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t expected);
-ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
- erts_aint32_t mask,
- erts_aint32_t set);
-ERTS_GLB_INLINE void erts_no_atomic64_set(erts_no_atomic64_t *var,
- erts_aint64_t i);
-ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read(erts_no_atomic64_t *var);
-ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_inc_read(erts_no_atomic64_t *incp);
-ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_dec_read(erts_no_atomic64_t *decp);
-ERTS_GLB_INLINE void erts_no_atomic64_inc(erts_no_atomic64_t *incp);
-ERTS_GLB_INLINE void erts_no_atomic64_dec(erts_no_atomic64_t *decp);
-ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_add_read(erts_no_atomic64_t *addp,
- erts_aint64_t i);
-ERTS_GLB_INLINE void erts_no_atomic64_add(erts_no_atomic64_t *addp,
- erts_aint64_t i);
-ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bor(erts_no_atomic64_t *var,
- erts_aint64_t mask);
-ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_band(erts_no_atomic64_t *var,
- erts_aint64_t mask);
-ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_xchg(erts_no_atomic64_t *xchgp,
- erts_aint64_t new);
-ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp,
- erts_aint64_t new,
- erts_aint64_t expected);
-ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
- erts_aint64_t mask,
- erts_aint64_t set);
-
-ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
- char *name,
- Eterm extra,
- Uint16 opt);
-ERTS_GLB_INLINE void erts_spinlock_init_x(erts_spinlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock);
ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -626,11 +466,10 @@ ERTS_GLB_INLINE void erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigne
ERTS_GLB_INLINE void erts_spin_lock(erts_spinlock_t *lock);
#endif
ERTS_GLB_INLINE int erts_lc_spinlock_is_locked(erts_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_rwlock_init_x(erts_rwlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -668,13 +507,10 @@ ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set,
sigset_t *oset);
ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
-#ifdef USE_THREADS
ERTS_GLB_INLINE void erts_thr_kill(erts_tid_t tid, int sig);
-#endif
#endif /* #ifdef HAVE_ETHR_SIG_FUNCS */
-#ifdef USE_THREADS
ERTS_GLB_INLINE erts_aint_t
erts_atomic_read_bset_nob(erts_atomic_t *var,
@@ -1682,379 +1518,6 @@ erts_atomic64_read_dirty(erts_atomic64_t *var)
#endif /* ARCH_32 */
-#else /* !USE_THREADS */
-
-/* Double word size atomics */
-
-#define erts_dw_atomic_init_nob erts_no_dw_atomic_set
-#define erts_dw_atomic_set_nob erts_no_dw_atomic_set
-#define erts_dw_atomic_read_nob erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_mb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_mb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_mb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_acqb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_acqb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_acqb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_relb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_relb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_relb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_ddrb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_ddrb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_ddrb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_rb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_rb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_rb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_wb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_wb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_wb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_set_dirty erts_no_dw_atomic_set
-#define erts_dw_atomic_read_dirty erts_no_dw_atomic_read
-
-/* Word size atomics */
-
-#define erts_atomic_init_nob erts_no_atomic_set
-#define erts_atomic_set_nob erts_no_atomic_set
-#define erts_atomic_read_nob erts_no_atomic_read
-#define erts_atomic_inc_read_nob erts_no_atomic_inc_read
-#define erts_atomic_dec_read_nob erts_no_atomic_dec_read
-#define erts_atomic_inc_nob erts_no_atomic_inc
-#define erts_atomic_dec_nob erts_no_atomic_dec
-#define erts_atomic_add_read_nob erts_no_atomic_add_read
-#define erts_atomic_add_nob erts_no_atomic_add
-#define erts_atomic_read_bor_nob erts_no_atomic_read_bor
-#define erts_atomic_read_band_nob erts_no_atomic_read_band
-#define erts_atomic_xchg_nob erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_nob erts_no_atomic_read_bset
-
-#define erts_atomic_init_mb erts_no_atomic_set
-#define erts_atomic_set_mb erts_no_atomic_set
-#define erts_atomic_read_mb erts_no_atomic_read
-#define erts_atomic_inc_read_mb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_mb erts_no_atomic_dec_read
-#define erts_atomic_inc_mb erts_no_atomic_inc
-#define erts_atomic_dec_mb erts_no_atomic_dec
-#define erts_atomic_add_read_mb erts_no_atomic_add_read
-#define erts_atomic_add_mb erts_no_atomic_add
-#define erts_atomic_read_bor_mb erts_no_atomic_read_bor
-#define erts_atomic_read_band_mb erts_no_atomic_read_band
-#define erts_atomic_xchg_mb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_mb erts_no_atomic_read_bset
-
-#define erts_atomic_init_acqb erts_no_atomic_set
-#define erts_atomic_set_acqb erts_no_atomic_set
-#define erts_atomic_read_acqb erts_no_atomic_read
-#define erts_atomic_inc_read_acqb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_acqb erts_no_atomic_dec_read
-#define erts_atomic_inc_acqb erts_no_atomic_inc
-#define erts_atomic_dec_acqb erts_no_atomic_dec
-#define erts_atomic_add_read_acqb erts_no_atomic_add_read
-#define erts_atomic_add_acqb erts_no_atomic_add
-#define erts_atomic_read_bor_acqb erts_no_atomic_read_bor
-#define erts_atomic_read_band_acqb erts_no_atomic_read_band
-#define erts_atomic_xchg_acqb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_acqb erts_no_atomic_read_bset
-
-#define erts_atomic_init_relb erts_no_atomic_set
-#define erts_atomic_set_relb erts_no_atomic_set
-#define erts_atomic_read_relb erts_no_atomic_read
-#define erts_atomic_inc_read_relb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_relb erts_no_atomic_dec_read
-#define erts_atomic_inc_relb erts_no_atomic_inc
-#define erts_atomic_dec_relb erts_no_atomic_dec
-#define erts_atomic_add_read_relb erts_no_atomic_add_read
-#define erts_atomic_add_relb erts_no_atomic_add
-#define erts_atomic_read_bor_relb erts_no_atomic_read_bor
-#define erts_atomic_read_band_relb erts_no_atomic_read_band
-#define erts_atomic_xchg_relb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_relb erts_no_atomic_read_bset
-
-#define erts_atomic_init_ddrb erts_no_atomic_set
-#define erts_atomic_set_ddrb erts_no_atomic_set
-#define erts_atomic_read_ddrb erts_no_atomic_read
-#define erts_atomic_inc_read_ddrb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_ddrb erts_no_atomic_dec_read
-#define erts_atomic_inc_ddrb erts_no_atomic_inc
-#define erts_atomic_dec_ddrb erts_no_atomic_dec
-#define erts_atomic_add_read_ddrb erts_no_atomic_add_read
-#define erts_atomic_add_ddrb erts_no_atomic_add
-#define erts_atomic_read_bor_ddrb erts_no_atomic_read_bor
-#define erts_atomic_read_band_ddrb erts_no_atomic_read_band
-#define erts_atomic_xchg_ddrb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_ddrb erts_no_atomic_read_bset
-
-#define erts_atomic_init_rb erts_no_atomic_set
-#define erts_atomic_set_rb erts_no_atomic_set
-#define erts_atomic_read_rb erts_no_atomic_read
-#define erts_atomic_inc_read_rb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_rb erts_no_atomic_dec_read
-#define erts_atomic_inc_rb erts_no_atomic_inc
-#define erts_atomic_dec_rb erts_no_atomic_dec
-#define erts_atomic_add_read_rb erts_no_atomic_add_read
-#define erts_atomic_add_rb erts_no_atomic_add
-#define erts_atomic_read_bor_rb erts_no_atomic_read_bor
-#define erts_atomic_read_band_rb erts_no_atomic_read_band
-#define erts_atomic_xchg_rb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_rb erts_no_atomic_read_bset
-
-#define erts_atomic_init_wb erts_no_atomic_set
-#define erts_atomic_set_wb erts_no_atomic_set
-#define erts_atomic_read_wb erts_no_atomic_read
-#define erts_atomic_inc_read_wb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_wb erts_no_atomic_dec_read
-#define erts_atomic_inc_wb erts_no_atomic_inc
-#define erts_atomic_dec_wb erts_no_atomic_dec
-#define erts_atomic_add_read_wb erts_no_atomic_add_read
-#define erts_atomic_add_wb erts_no_atomic_add
-#define erts_atomic_read_bor_wb erts_no_atomic_read_bor
-#define erts_atomic_read_band_wb erts_no_atomic_read_band
-#define erts_atomic_xchg_wb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_wb erts_no_atomic_read_bset
-
-#define erts_atomic_set_dirty erts_no_atomic_set
-#define erts_atomic_read_dirty erts_no_atomic_read
-
-/* 32-bit atomics */
-
-#define erts_atomic32_init_nob erts_no_atomic32_set
-#define erts_atomic32_set_nob erts_no_atomic32_set
-#define erts_atomic32_read_nob erts_no_atomic32_read
-#define erts_atomic32_inc_read_nob erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_nob erts_no_atomic32_dec_read
-#define erts_atomic32_inc_nob erts_no_atomic32_inc
-#define erts_atomic32_dec_nob erts_no_atomic32_dec
-#define erts_atomic32_add_read_nob erts_no_atomic32_add_read
-#define erts_atomic32_add_nob erts_no_atomic32_add
-#define erts_atomic32_read_bor_nob erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_nob erts_no_atomic32_read_band
-#define erts_atomic32_xchg_nob erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_nob erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_mb erts_no_atomic32_set
-#define erts_atomic32_set_mb erts_no_atomic32_set
-#define erts_atomic32_read_mb erts_no_atomic32_read
-#define erts_atomic32_inc_read_mb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_mb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_mb erts_no_atomic32_inc
-#define erts_atomic32_dec_mb erts_no_atomic32_dec
-#define erts_atomic32_add_read_mb erts_no_atomic32_add_read
-#define erts_atomic32_add_mb erts_no_atomic32_add
-#define erts_atomic32_read_bor_mb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_mb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_mb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_mb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_acqb erts_no_atomic32_set
-#define erts_atomic32_set_acqb erts_no_atomic32_set
-#define erts_atomic32_read_acqb erts_no_atomic32_read
-#define erts_atomic32_inc_read_acqb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_acqb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_acqb erts_no_atomic32_inc
-#define erts_atomic32_dec_acqb erts_no_atomic32_dec
-#define erts_atomic32_add_read_acqb erts_no_atomic32_add_read
-#define erts_atomic32_add_acqb erts_no_atomic32_add
-#define erts_atomic32_read_bor_acqb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_acqb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_acqb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_acqb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_relb erts_no_atomic32_set
-#define erts_atomic32_set_relb erts_no_atomic32_set
-#define erts_atomic32_read_relb erts_no_atomic32_read
-#define erts_atomic32_inc_read_relb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_relb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_relb erts_no_atomic32_inc
-#define erts_atomic32_dec_relb erts_no_atomic32_dec
-#define erts_atomic32_add_read_relb erts_no_atomic32_add_read
-#define erts_atomic32_add_relb erts_no_atomic32_add
-#define erts_atomic32_read_bor_relb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_relb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_relb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_relb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_ddrb erts_no_atomic32_set
-#define erts_atomic32_set_ddrb erts_no_atomic32_set
-#define erts_atomic32_read_ddrb erts_no_atomic32_read
-#define erts_atomic32_inc_read_ddrb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_ddrb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_ddrb erts_no_atomic32_inc
-#define erts_atomic32_dec_ddrb erts_no_atomic32_dec
-#define erts_atomic32_add_read_ddrb erts_no_atomic32_add_read
-#define erts_atomic32_add_ddrb erts_no_atomic32_add
-#define erts_atomic32_read_bor_ddrb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_ddrb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_ddrb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_ddrb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_rb erts_no_atomic32_set
-#define erts_atomic32_set_rb erts_no_atomic32_set
-#define erts_atomic32_read_rb erts_no_atomic32_read
-#define erts_atomic32_inc_read_rb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_rb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_rb erts_no_atomic32_inc
-#define erts_atomic32_dec_rb erts_no_atomic32_dec
-#define erts_atomic32_add_read_rb erts_no_atomic32_add_read
-#define erts_atomic32_add_rb erts_no_atomic32_add
-#define erts_atomic32_read_bor_rb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_rb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_rb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_rb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_wb erts_no_atomic32_set
-#define erts_atomic32_set_wb erts_no_atomic32_set
-#define erts_atomic32_read_wb erts_no_atomic32_read
-#define erts_atomic32_inc_read_wb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_wb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_wb erts_no_atomic32_inc
-#define erts_atomic32_dec_wb erts_no_atomic32_dec
-#define erts_atomic32_add_read_wb erts_no_atomic32_add_read
-#define erts_atomic32_add_wb erts_no_atomic32_add
-#define erts_atomic32_read_bor_wb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_wb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_wb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_wb erts_no_atomic32_read_bset
-
-#define erts_atomic32_set_dirty erts_no_atomic32_set
-#define erts_atomic32_read_dirty erts_no_atomic32_read
-
-/* 64-bit atomics */
-
-#define erts_atomic64_init_nob erts_no_atomic64_set
-#define erts_atomic64_set_nob erts_no_atomic64_set
-#define erts_atomic64_read_nob erts_no_atomic64_read
-#define erts_atomic64_inc_read_nob erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_nob erts_no_atomic64_dec_read
-#define erts_atomic64_inc_nob erts_no_atomic64_inc
-#define erts_atomic64_dec_nob erts_no_atomic64_dec
-#define erts_atomic64_add_read_nob erts_no_atomic64_add_read
-#define erts_atomic64_add_nob erts_no_atomic64_add
-#define erts_atomic64_read_bor_nob erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_nob erts_no_atomic64_read_band
-#define erts_atomic64_xchg_nob erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_nob erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_mb erts_no_atomic64_set
-#define erts_atomic64_set_mb erts_no_atomic64_set
-#define erts_atomic64_read_mb erts_no_atomic64_read
-#define erts_atomic64_inc_read_mb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_mb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_mb erts_no_atomic64_inc
-#define erts_atomic64_dec_mb erts_no_atomic64_dec
-#define erts_atomic64_add_read_mb erts_no_atomic64_add_read
-#define erts_atomic64_add_mb erts_no_atomic64_add
-#define erts_atomic64_read_bor_mb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_mb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_mb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_mb erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_acqb erts_no_atomic64_set
-#define erts_atomic64_set_acqb erts_no_atomic64_set
-#define erts_atomic64_read_acqb erts_no_atomic64_read
-#define erts_atomic64_inc_read_acqb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_acqb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_acqb erts_no_atomic64_inc
-#define erts_atomic64_dec_acqb erts_no_atomic64_dec
-#define erts_atomic64_add_read_acqb erts_no_atomic64_add_read
-#define erts_atomic64_add_acqb erts_no_atomic64_add
-#define erts_atomic64_read_bor_acqb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_acqb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_acqb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_acqb erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_relb erts_no_atomic64_set
-#define erts_atomic64_set_relb erts_no_atomic64_set
-#define erts_atomic64_read_relb erts_no_atomic64_read
-#define erts_atomic64_inc_read_relb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_relb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_relb erts_no_atomic64_inc
-#define erts_atomic64_dec_relb erts_no_atomic64_dec
-#define erts_atomic64_add_read_relb erts_no_atomic64_add_read
-#define erts_atomic64_add_relb erts_no_atomic64_add
-#define erts_atomic64_read_bor_relb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_relb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_relb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_relb erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_ddrb erts_no_atomic64_set
-#define erts_atomic64_set_ddrb erts_no_atomic64_set
-#define erts_atomic64_read_ddrb erts_no_atomic64_read
-#define erts_atomic64_inc_read_ddrb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_ddrb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_ddrb erts_no_atomic64_inc
-#define erts_atomic64_dec_ddrb erts_no_atomic64_dec
-#define erts_atomic64_add_read_ddrb erts_no_atomic64_add_read
-#define erts_atomic64_add_ddrb erts_no_atomic64_add
-#define erts_atomic64_read_bor_ddrb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_ddrb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_ddrb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_ddrb erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_rb erts_no_atomic64_set
-#define erts_atomic64_set_rb erts_no_atomic64_set
-#define erts_atomic64_read_rb erts_no_atomic64_read
-#define erts_atomic64_inc_read_rb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_rb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_rb erts_no_atomic64_inc
-#define erts_atomic64_dec_rb erts_no_atomic64_dec
-#define erts_atomic64_add_read_rb erts_no_atomic64_add_read
-#define erts_atomic64_add_rb erts_no_atomic64_add
-#define erts_atomic64_read_bor_rb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_rb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_rb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_rb erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_wb erts_no_atomic64_set
-#define erts_atomic64_set_wb erts_no_atomic64_set
-#define erts_atomic64_read_wb erts_no_atomic64_read
-#define erts_atomic64_inc_read_wb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_wb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_wb erts_no_atomic64_inc
-#define erts_atomic64_dec_wb erts_no_atomic64_dec
-#define erts_atomic64_add_read_wb erts_no_atomic64_add_read
-#define erts_atomic64_add_wb erts_no_atomic64_add
-#define erts_atomic64_read_bor_wb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_wb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_wb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_wb erts_no_atomic64_read_bset
-
-#define erts_atomic64_set_dirty erts_no_atomic64_set
-#define erts_atomic64_read_dirty erts_no_atomic64_read
-
-#endif /* !USE_THREADS */
#include "erl_msacc.h"
@@ -2063,221 +1526,127 @@ erts_atomic64_read_dirty(erts_atomic64_t *var)
ERTS_GLB_INLINE void
erts_thr_init(erts_thr_init_data_t *id)
{
-#ifdef USE_THREADS
int res = ethr_init(id);
if (res)
erts_thr_fatal_error(res, "initialize thread library");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_late_init(erts_thr_late_init_data_t *id)
{
-#ifdef USE_THREADS
int res = ethr_late_init(id);
if (res)
erts_thr_fatal_error(res, "complete initialization of thread library");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg,
erts_thr_opts_t *opts)
{
-#ifdef USE_THREADS
int res = ethr_thr_create(tid, func, arg, opts);
if (res)
erts_thr_fatal_error(res, "create thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_join(erts_tid_t tid, void **thr_res)
{
-#ifdef USE_THREADS
int res = ethr_thr_join(tid, thr_res);
if (res)
erts_thr_fatal_error(res, "join thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_detach(erts_tid_t tid)
{
-#ifdef USE_THREADS
int res = ethr_thr_detach(tid);
if (res)
erts_thr_fatal_error(res, "detach thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_exit(void *res)
{
-#ifdef USE_THREADS
ethr_thr_exit(res);
erts_thr_fatal_error(0, "terminate thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_install_exit_handler(void (*exit_handler)(void))
{
-#ifdef USE_THREADS
int res = ethr_install_exit_handler(exit_handler);
if (res != 0)
erts_thr_fatal_error(res, "install thread exit handler");
-#endif
}
ERTS_GLB_INLINE erts_tid_t
erts_thr_self(void)
{
-#ifdef USE_THREADS
return ethr_self();
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE int
erts_thr_getname(erts_tid_t tid, char *buf, size_t len)
{
-#ifdef USE_THREADS
return ethr_getname(tid, buf, len);
-#else
- return -1;
-#endif
}
ERTS_GLB_INLINE int
erts_equal_tids(erts_tid_t x, erts_tid_t y)
{
-#ifdef USE_THREADS
return ethr_equal_tids(x, y);
-#else
- return 1;
-#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
+erts_mtx_init(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
-#endif
-#endif
-}
+ if (res) {
+ erts_thr_fatal_error(res, "initialize mutex");
+ }
-ERTS_GLB_INLINE void
-erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt,
- int enable_lcnt)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX | opt, extra);
+ flags |= ERTS_LOCK_TYPE_MUTEX;
+#ifdef DEBUG
+ mtx->flags = flags;
#endif
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra, int enable_lcnt)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
+ erts_lc_init_lock_x(&mtx->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- if (enable_lcnt)
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
- else
- erts_lcnt_init_lock_x(&mtx->lcnt, NULL, ERTS_LCNT_LT_MUTEX, extra);
-#endif
- ethr_mutex_lock(&mtx->mtx);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_trylock(1, &mtx->lc);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock(&mtx->lcnt, 1);
-#endif
+ erts_lcnt_init_ref_x(&mtx->lcnt, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
-erts_mtx_init(erts_mtx_t *mtx, char *name)
+erts_mtx_init_locked(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
-#endif
-#endif
-}
+ erts_mtx_init(mtx, name, extra, flags);
-ERTS_GLB_INLINE void
-erts_mtx_init_locked(erts_mtx_t *mtx, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
-#endif
ethr_mutex_lock(&mtx->mtx);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_trylock(1, &mtx->lc);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock(&mtx->lcnt, 1);
-#endif
-#endif
+ #ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &mtx->lc);
+ #endif
+ #ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&mtx->lcnt, 1);
+ #endif
}
ERTS_GLB_INLINE void
erts_mtx_destroy(erts_mtx_t *mtx)
{
-#ifdef USE_THREADS
int res;
+
+ ASSERT(!(mtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&mtx->lcnt);
+ erts_lcnt_uninstall(&mtx->lcnt);
#endif
res = ethr_mutex_destroy(&mtx->mtx);
if (res != 0) {
@@ -2291,7 +1660,6 @@ erts_mtx_destroy(erts_mtx_t *mtx)
#endif
erts_thr_fatal_error(res, "destroy mutex");
}
-#endif
}
ERTS_GLB_INLINE int
@@ -2301,7 +1669,6 @@ erts_mtx_trylock_x(erts_mtx_t *mtx, char *file, unsigned int line)
erts_mtx_trylock(erts_mtx_t *mtx)
#endif
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2323,9 +1690,6 @@ erts_mtx_trylock(erts_mtx_t *mtx)
erts_lcnt_trylock(&mtx->lcnt, res);
#endif
return res;
-#else
- return 0;
-#endif
}
@@ -2336,7 +1700,6 @@ erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line)
erts_mtx_lock(erts_mtx_t *mtx)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
erts_lc_lock_x(&mtx->lc, file, line);
@@ -2351,13 +1714,11 @@ erts_mtx_lock(erts_mtx_t *mtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&mtx->lcnt, file, line);
#endif
-#endif
}
ERTS_GLB_INLINE void
erts_mtx_unlock(erts_mtx_t *mtx)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&mtx->lc);
#endif
@@ -2365,16 +1726,16 @@ erts_mtx_unlock(erts_mtx_t *mtx)
erts_lcnt_unlock(&mtx->lcnt);
#endif
ethr_mutex_unlock(&mtx->mtx);
-#endif
}
ERTS_GLB_INLINE int
erts_lc_mtx_is_locked(erts_mtx_t *mtx)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = 0;
+ lc.flags = ERTS_LOCK_FLAGS_TYPE_MUTEX;
+ lc.taken_options = 0;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2385,17 +1746,14 @@ erts_lc_mtx_is_locked(erts_mtx_t *mtx)
ERTS_GLB_INLINE void
erts_cnd_init(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
int res = ethr_cond_init(cnd);
if (res)
erts_thr_fatal_error(res, "initialize condition variable");
-#endif
}
ERTS_GLB_INLINE void
erts_cnd_destroy(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
int res = ethr_cond_destroy(cnd);
if (res != 0) {
#ifdef ERTS_THR_HAVE_BUSY_DESTROY_BUG
@@ -2408,13 +1766,11 @@ erts_cnd_destroy(erts_cnd_t *cnd)
#endif
erts_thr_fatal_error(res, "destroy condition variable");
}
-#endif
}
ERTS_GLB_INLINE void
erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
{
-#ifdef USE_THREADS
int res;
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2436,7 +1792,6 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
if (res != 0 && res != EINTR)
erts_thr_fatal_error(res, "wait on condition variable");
ERTS_MSACC_POP_STATE();
-#endif
}
/*
@@ -2452,18 +1807,14 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
ERTS_GLB_INLINE void
erts_cnd_signal(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
ethr_cond_signal(cnd);
-#endif
}
ERTS_GLB_INLINE void
erts_cnd_broadcast(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
ethr_cond_broadcast(cnd);
-#endif
}
/* rwmutex */
@@ -2471,81 +1822,54 @@ erts_cnd_broadcast(erts_cnd_t *cnd)
ERTS_GLB_INLINE void
erts_rwmtx_set_reader_group(int no)
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_check_no_locked_of_type(ERTS_LC_FLG_LT_RWMUTEX);
+ erts_lc_check_no_locked_of_type(ERTS_LOCK_TYPE_RWMUTEX);
#endif
res = ethr_rwmutex_set_reader_group(no);
if (res != 0)
erts_thr_fatal_error(res, "set reader group");
-#endif
}
ERTS_GLB_INLINE void
-erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name,
- Eterm extra)
-{
-#ifdef USE_THREADS
+erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, erts_rwmtx_opt_t *opt,
+ char *name, Eterm extra, erts_lock_flags_t flags) {
int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize rwmutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (name && name[0] == '\0')
- erts_lcnt_init_lock_x(&rwmtx->lcnt, NULL, ERTS_LCNT_LT_RWMUTEX, extra);
- else
- erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
-#endif
-#endif
-}
+ if (res != 0) {
+ erts_thr_fatal_error(res, "initialize rwmutex");
+ }
-ERTS_GLB_INLINE void
-erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
- char *name,
- Eterm extra)
-{
- erts_rwmtx_init_opt_x(rwmtx, NULL, name, extra);
-}
+ flags |= ERTS_LOCK_TYPE_RWMUTEX;
+#ifdef DEBUG
+ rwmtx->flags = flags;
+#endif
-ERTS_GLB_INLINE void
-erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX);
+ erts_lc_init_lock_x(&rwmtx->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX);
-#endif
+ erts_lcnt_init_ref_x(&rwmtx->lcnt, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
-erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
-{
- erts_rwmtx_init_opt(rwmtx, NULL, name);
+erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name, Eterm extra,
+ erts_lock_flags_t flags) {
+ erts_rwmtx_init_opt(rwmtx, NULL, name, extra, flags);
}
ERTS_GLB_INLINE void
erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
{
-#ifdef USE_THREADS
int res;
+
+ ASSERT(!(rwmtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&rwmtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&rwmtx->lcnt);
+ erts_lcnt_uninstall(&rwmtx->lcnt);
#endif
res = ethr_rwmutex_destroy(&rwmtx->rwmtx);
if (res != 0) {
@@ -2559,7 +1883,6 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
#endif
erts_thr_fatal_error(res, "destroy rwmutex");
}
-#endif
}
ERTS_GLB_INLINE int
@@ -2569,11 +1892,10 @@ erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ))
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ))
return EBUSY; /* Make sure caller can handle the situation without
causing a lock order violation */
#endif
@@ -2582,19 +1904,16 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LOCK_OPTIONS_READ);
#endif
return res;
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void
@@ -2604,36 +1923,32 @@ erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_rlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
-#endif
}
ERTS_GLB_INLINE void
erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_runlock(&rwmtx->rwmtx);
-#endif
}
@@ -2644,11 +1959,10 @@ erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE))
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR))
return EBUSY; /* Make sure caller can handle the situation without
causing a lock order violation */
#endif
@@ -2657,19 +1971,16 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LOCK_OPTIONS_RDWR);
#endif
return res;
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void
@@ -2679,36 +1990,32 @@ erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
-#endif
}
ERTS_GLB_INLINE void
erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
-#endif
}
#if 0 /* The following rwmtx function names are
@@ -2740,10 +2047,11 @@ erts_rwmtx_wunlock(erts_rwmtx_t *rwmtx)
ERTS_GLB_INLINE int
erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = ERTS_LC_FLG_LO_READ;
+ lc.flags = ERTS_LOCK_TYPE_RWMUTEX;
+ lc.taken_options = ERTS_LOCK_OPTIONS_READ;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2754,10 +2062,11 @@ erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx)
ERTS_GLB_INLINE int
erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ lc.flags = ERTS_LOCK_TYPE_RWMUTEX;
+ lc.taken_options = ERTS_LOCK_OPTIONS_RDWR;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2765,396 +2074,41 @@ erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx)
#endif
}
-/* No atomic ops */
-
-ERTS_GLB_INLINE void
-erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val)
-{
- var->sint[0] = val->sint[0];
- var->sint[1] = val->sint[1];
-}
-
-ERTS_GLB_INLINE void
-erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val)
-{
- val->sint[0] = var->sint[0];
- val->sint[1] = var->sint[1];
-}
-
-ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var,
- erts_no_dw_atomic_t *new_val,
- erts_no_dw_atomic_t *old_val)
-{
- if (var->sint[0] != old_val->sint[0] || var->sint[1] != old_val->sint[1]) {
- erts_no_dw_atomic_read(var, old_val);
- return 0;
- }
- else {
- erts_no_dw_atomic_set(var, new_val);
- return !0;
- }
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i)
-{
- *var = i;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_read(erts_no_atomic_t *var)
-{
- return *var;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_inc_read(erts_no_atomic_t *incp)
-{
- return ++(*incp);
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_dec_read(erts_no_atomic_t *decp)
-{
- return --(*decp);
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic_inc(erts_no_atomic_t *incp)
-{
- ++(*incp);
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic_dec(erts_no_atomic_t *decp)
-{
- --(*decp);
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_add_read(erts_no_atomic_t *addp, erts_aint_t i)
-{
- return *addp += i;
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i)
-{
- *addp += i;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_read_bor(erts_no_atomic_t *var, erts_aint_t mask)
-{
- erts_aint_t old;
- old = *var;
- *var |= mask;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_read_band(erts_no_atomic_t *var, erts_aint_t mask)
-{
- erts_aint_t old;
- old = *var;
- *var &= mask;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_xchg(erts_no_atomic_t *xchgp, erts_aint_t new)
-{
- erts_aint_t old = *xchgp;
- *xchgp = new;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t expected)
-{
- erts_aint_t old = *xchgp;
- if (old == expected)
- *xchgp = new;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_no_atomic_read_bset(erts_no_atomic_t *var,
- erts_aint_t mask,
- erts_aint_t set)
-{
- erts_aint_t old = *var;
- *var &= ~mask;
- *var |= (mask & set);
- return old;
-}
-
-/* atomic32 */
-
-ERTS_GLB_INLINE void
-erts_no_atomic32_set(erts_no_atomic32_t *var, erts_aint32_t i)
-{
- *var = i;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_read(erts_no_atomic32_t *var)
-{
- return *var;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_inc_read(erts_no_atomic32_t *incp)
-{
- return ++(*incp);
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_dec_read(erts_no_atomic32_t *decp)
-{
- return --(*decp);
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic32_inc(erts_no_atomic32_t *incp)
-{
- ++(*incp);
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic32_dec(erts_no_atomic32_t *decp)
-{
- --(*decp);
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_add_read(erts_no_atomic32_t *addp, erts_aint32_t i)
-{
- return *addp += i;
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic32_add(erts_no_atomic32_t *addp, erts_aint32_t i)
-{
- *addp += i;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_read_bor(erts_no_atomic32_t *var, erts_aint32_t mask)
-{
- erts_aint32_t old;
- old = *var;
- *var |= mask;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_read_band(erts_no_atomic32_t *var, erts_aint32_t mask)
-{
- erts_aint32_t old;
- old = *var;
- *var &= mask;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp, erts_aint32_t new)
-{
- erts_aint32_t old = *xchgp;
- *xchgp = new;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t expected)
-{
- erts_aint32_t old = *xchgp;
- if (old == expected)
- *xchgp = new;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
- erts_aint32_t mask,
- erts_aint32_t set)
-{
- erts_aint32_t old = *var;
- *var &= ~mask;
- *var |= (mask & set);
- return old;
-}
-
-/* atomic64 */
-
-ERTS_GLB_INLINE void
-erts_no_atomic64_set(erts_no_atomic64_t *var, erts_aint64_t i)
-{
- *var = i;
-}
-
-ERTS_GLB_INLINE erts_aint64_t
-erts_no_atomic64_read(erts_no_atomic64_t *var)
-{
- return *var;
-}
-
-ERTS_GLB_INLINE erts_aint64_t
-erts_no_atomic64_inc_read(erts_no_atomic64_t *incp)
-{
- return ++(*incp);
-}
-
-ERTS_GLB_INLINE erts_aint64_t
-erts_no_atomic64_dec_read(erts_no_atomic64_t *decp)
-{
- return --(*decp);
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic64_inc(erts_no_atomic64_t *incp)
-{
- ++(*incp);
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic64_dec(erts_no_atomic64_t *decp)
-{
- --(*decp);
-}
-
-ERTS_GLB_INLINE erts_aint64_t
-erts_no_atomic64_add_read(erts_no_atomic64_t *addp, erts_aint64_t i)
-{
- return *addp += i;
-}
-
-ERTS_GLB_INLINE void
-erts_no_atomic64_add(erts_no_atomic64_t *addp, erts_aint64_t i)
-{
- *addp += i;
-}
-
-ERTS_GLB_INLINE erts_aint64_t
-erts_no_atomic64_read_bor(erts_no_atomic64_t *var, erts_aint64_t mask)
-{
- erts_aint64_t old;
- old = *var;
- *var |= mask;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint64_t
-erts_no_atomic64_read_band(erts_no_atomic64_t *var, erts_aint64_t mask)
-{
- erts_aint64_t old;
- old = *var;
- *var &= mask;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint64_t
-erts_no_atomic64_xchg(erts_no_atomic64_t *xchgp, erts_aint64_t new)
-{
- erts_aint64_t old = *xchgp;
- *xchgp = new;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint64_t
-erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp,
- erts_aint64_t new,
- erts_aint64_t expected)
-{
- erts_aint64_t old = *xchgp;
- if (old == expected)
- *xchgp = new;
- return old;
-}
-
-ERTS_GLB_INLINE erts_aint64_t
-erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
- erts_aint64_t mask,
- erts_aint64_t set)
-{
- erts_aint64_t old = *var;
- *var &= ~mask;
- *var |= (mask & set);
- return old;
-}
-
/* spinlock */
ERTS_GLB_INLINE void
-erts_spinlock_init_x(erts_spinlock_t *lock, char *name, Eterm extra)
+erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK, extra);
-#endif
-#else
- (void)lock;
-#endif
-}
+ if (res) {
+ erts_thr_fatal_error(res, "init spinlock");
+ }
-ERTS_GLB_INLINE void
-erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, Eterm extra,
- Uint16 opt)
-{
-#ifdef USE_THREADS
- int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
+ flags |= ERTS_LOCK_TYPE_SPINLOCK;
+#ifdef DEBUG
+ lock->flags = flags;
#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK|opt, extra);
-#endif
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_spinlock_init(erts_spinlock_t *lock, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK);
+ erts_lc_init_lock_x(&lock->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK);
-#endif
-#else
- (void)lock;
+ erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
erts_spinlock_destroy(erts_spinlock_t *lock)
{
-#ifdef USE_THREADS
int res;
+
+ ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&lock->lcnt);
+ erts_lcnt_uninstall(&lock->lcnt);
#endif
res = ethr_spinlock_destroy(&lock->slck);
if (res != 0) {
@@ -3168,15 +2122,11 @@ erts_spinlock_destroy(erts_spinlock_t *lock)
#endif
erts_thr_fatal_error(res, "destroy rwlock");
}
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_spin_unlock(erts_spinlock_t *lock)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&lock->lc);
#endif
@@ -3184,9 +2134,6 @@ erts_spin_unlock(erts_spinlock_t *lock)
erts_lcnt_unlock(&lock->lcnt);
#endif
ethr_spin_unlock(&lock->slck);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -3196,7 +2143,6 @@ erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line)
erts_spin_lock(erts_spinlock_t *lock)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
erts_lc_lock_x(&lock->lc,file,line);
@@ -3211,18 +2157,16 @@ erts_spin_lock(erts_spinlock_t *lock)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE int
erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = 0;
+ lc.flags = ERTS_LOCK_TYPE_SPINLOCK;
+ lc.taken_options = 0;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3233,51 +2177,38 @@ erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
/* rwspinlock */
ERTS_GLB_INLINE void
-erts_rwlock_init_x(erts_rwlock_t *lock, char *name, Eterm extra)
+erts_rwlock_init(erts_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
int res = ethr_rwlock_init(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "init rwlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK, extra);
-#endif
-#else
- (void)lock;
+ if (res) {
+ erts_thr_fatal_error(res, "init rwlock");
+ }
+
+ flags |= ERTS_LOCK_TYPE_RWSPINLOCK;
+#ifdef DEBUG
+ lock->flags = flags;
#endif
-}
-ERTS_GLB_INLINE void
-erts_rwlock_init(erts_rwlock_t *lock, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_rwlock_init(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "init rwlock");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK);
+ erts_lc_init_lock_x(&lock->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK);
-#endif
-#else
- (void)lock;
+ erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
erts_rwlock_destroy(erts_rwlock_t *lock)
{
-#ifdef USE_THREADS
int res;
+
+ ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&lock->lcnt);
+ erts_lcnt_uninstall(&lock->lcnt);
#endif
res = ethr_rwlock_destroy(&lock->rwlck);
if (res != 0) {
@@ -3291,25 +2222,18 @@ erts_rwlock_destroy(erts_rwlock_t *lock)
#endif
erts_thr_fatal_error(res, "destroy rwlock");
}
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_read_unlock(erts_rwlock_t *lock)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_read_unlock(&lock->rwlck);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -3319,40 +2243,32 @@ erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
erts_read_lock(erts_rwlock_t *lock)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_lock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_read_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_write_unlock(erts_rwlock_t *lock)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_write_unlock(&lock->rwlck);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -3362,33 +2278,30 @@ erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
erts_write_lock(erts_rwlock_t *lock)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_lock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_write_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE int
erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = ERTS_LC_FLG_LO_READ;
+ lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK;
+ lc.taken_options = ERTS_LOCK_OPTIONS_READ;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3399,10 +2312,11 @@ erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock)
ERTS_GLB_INLINE int
erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
{
-#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK;
+ lc.taken_options = ERTS_LOCK_OPTIONS_RDWR;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3413,125 +2327,90 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
ERTS_GLB_INLINE void
erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname)
{
-#ifdef USE_THREADS
int res = ethr_tsd_key_create(keyp, keyname);
if (res)
erts_thr_fatal_error(res, "create thread specific data key");
-#endif
}
ERTS_GLB_INLINE void
erts_tsd_key_delete(erts_tsd_key_t key)
{
-#ifdef USE_THREADS
int res = ethr_tsd_key_delete(key);
if (res)
erts_thr_fatal_error(res, "delete thread specific data key");
-#endif
}
ERTS_GLB_INLINE void
erts_tsd_set(erts_tsd_key_t key, void *value)
{
-#ifdef USE_THREADS
int res = ethr_tsd_set(key, value);
if (res)
erts_thr_fatal_error(res, "set thread specific data");
-#endif
}
ERTS_GLB_INLINE void *
erts_tsd_get(erts_tsd_key_t key)
{
-#ifdef USE_THREADS
return ethr_tsd_get(key);
-#else
- return NULL;
-#endif
}
ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void)
{
-#ifdef USE_THREADS
return (erts_tse_t *) ethr_get_ts_event();
-#else
- return (erts_tse_t *) NULL;
-#endif
}
ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep)
{
-#ifdef USE_THREADS
ethr_leave_ts_event(ep);
-#endif
}
ERTS_GLB_INLINE void erts_tse_prepare_timed(erts_tse_t *ep)
{
-#ifdef USE_THREADS
int res = ethr_event_prepare_timed(&((ethr_ts_event *) ep)->event);
if (res != 0)
erts_thr_fatal_error(res, "prepare timed");
-#endif
}
ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep)
{
-#ifdef USE_THREADS
ethr_event_set(&((ethr_ts_event *) ep)->event);
-#endif
}
ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep)
{
-#ifdef USE_THREADS
ethr_event_reset(&((ethr_ts_event *) ep)->event);
-#endif
}
ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep)
{
-#ifdef USE_THREADS
int res;
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
res = ethr_event_wait(&((ethr_ts_event *) ep)->event);
ERTS_MSACC_POP_STATE();
return res;
-#else
- return ENOTSUP;
-#endif
}
ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount)
{
-#ifdef USE_THREADS
int res;
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
res = ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount);
ERTS_MSACC_POP_STATE();
return res;
-#else
- return ENOTSUP;
-#endif
}
ERTS_GLB_INLINE int erts_tse_twait(erts_tse_t *ep, Sint64 tmo)
{
-#ifdef USE_THREADS
int res;
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
res = ethr_event_twait(&((ethr_ts_event *) ep)->event,
(ethr_sint64_t) tmo);
ERTS_MSACC_POP_STATE();
return res;
-#else
- return ENOTSUP;
-#endif
}
ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo)
{
-#ifdef USE_THREADS
int res;
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
res = ethr_event_stwait(&((ethr_ts_event *) ep)->event,
@@ -3539,49 +2418,34 @@ ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo)
(ethr_sint64_t) tmo);
ERTS_MSACC_POP_STATE();
return res;
-#else
- return ENOTSUP;
-#endif
}
ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep)
{
-#ifdef USE_THREADS
return (ep->iflgs & ETHR_TS_EV_TMP) == ETHR_TS_EV_TMP;
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void erts_thr_set_main_status(int on, int no)
{
-#ifdef USE_THREADS
int res = ethr_set_main_thr_status(on, no);
if (res != 0)
erts_thr_fatal_error(res, "set thread main status");
-#endif
}
ERTS_GLB_INLINE int erts_thr_get_main_status(void)
{
-#ifdef USE_THREADS
int main_status;
int res = ethr_get_main_thr_status(&main_status);
if (res != 0)
erts_thr_fatal_error(res, "get thread main status");
return main_status;
-#else
- return 1;
-#endif
}
ERTS_GLB_INLINE void erts_thr_yield(void)
{
-#ifdef USE_THREADS
int res = ETHR_YIELD();
if (res != 0)
erts_thr_fatal_error(res, "yield");
-#endif
}
@@ -3589,34 +2453,28 @@ ERTS_GLB_INLINE void erts_thr_yield(void)
ERTS_GLB_INLINE void
erts_thr_kill(erts_tid_t tid, int sig) {
-#ifdef USE_THREADS
int res = ethr_kill((ethr_tid)tid, sig);
if (res)
erts_thr_fatal_error(res, "killing thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
-#ifdef USE_THREADS
int res = ethr_sigmask(how, set, oset);
if (res)
erts_thr_fatal_error(res, "get or set signal mask");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_sigwait(const sigset_t *set, int *sig)
{
-#ifdef USE_THREADS
int res;
do {
res = ethr_sigwait(set, sig);
} while (res == EINTR);
if (res)
erts_thr_fatal_error(res, "to wait for signal");
-#endif
}
#endif /* #ifdef HAVE_ETHR_SIG_FUNCS */
@@ -3624,37 +2482,3 @@ erts_thr_sigwait(const sigset_t *set, int *sig)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* #ifndef ERL_THREAD_H__ */
-
-#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS
-
-/* Deprecated functions to replace */
-
-#undef erts_atomic_init
-#undef erts_atomic_set
-#undef erts_atomic_read
-#undef erts_atomic_inctest
-#undef erts_atomic_dectest
-#undef erts_atomic_inc
-#undef erts_atomic_dec
-#undef erts_atomic_addtest
-#undef erts_atomic_add
-#undef erts_atomic_xchg
-#undef erts_atomic_cmpxchg
-#undef erts_atomic_bor
-#undef erts_atomic_band
-
-#undef erts_atomic32_init
-#undef erts_atomic32_set
-#undef erts_atomic32_read
-#undef erts_atomic32_inctest
-#undef erts_atomic32_dectest
-#undef erts_atomic32_inc
-#undef erts_atomic32_dec
-#undef erts_atomic32_addtest
-#undef erts_atomic32_add
-#undef erts_atomic32_xchg
-#undef erts_atomic32_cmpxchg
-#undef erts_atomic32_bor
-#undef erts_atomic32_band
-
-#endif
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index a1c4220633..968f21fd51 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,19 +21,54 @@
#ifndef ERL_TIME_H__
#define ERL_TIME_H__
-/* timer wheel size NEED to be a power of 2 */
-#ifdef SMALL_MEMORY
-#define ERTS_TIW_SIZE (1 << 13)
-#else
-#define ERTS_TIW_SIZE (1 << 16)
+#include "erl_monitor_link.h"
+
+#if 0
+# define ERTS_TW_DEBUG
+#endif
+#if defined(DEBUG) && !defined(ERTS_TW_DEBUG)
+# define ERTS_TW_DEBUG
#endif
-#if defined(DEBUG) || 0
+#if defined(ERTS_TW_DEBUG)
#define ERTS_TIME_ASSERT(B) ERTS_ASSERT(B)
#else
#define ERTS_TIME_ASSERT(B) ((void) 1)
#endif
+#ifdef ERTS_TW_DEBUG
+/*
+ * Soon wheel will handle about 1 seconds
+ * Later wheel will handle about 8 minutes
+ */
+# define ERTS_TW_SOON_WHEEL_BITS 10
+# define ERTS_TW_LATER_WHEEL_BITS 10
+#else
+# ifdef SMALL_MEMORY
+/*
+ * Soon wheel will handle about 4 seconds
+ * Later wheel will handle about 2 hours and 19 minutes
+ */
+# define ERTS_TW_SOON_WHEEL_BITS 12
+# define ERTS_TW_LATER_WHEEL_BITS 12
+# else
+/*
+ * Soon wheel will handle about 16 seconds
+ * Later wheel will handle about 37 hours and 16 minutes
+ */
+# define ERTS_TW_SOON_WHEEL_BITS 14
+# define ERTS_TW_LATER_WHEEL_BITS 14
+# endif
+#endif
+
+/*
+ * Number of slots in each timer wheel...
+ *
+ * These *need* to be a power of 2
+ */
+#define ERTS_TW_SOON_WHEEL_SIZE (1 << ERTS_TW_SOON_WHEEL_BITS)
+#define ERTS_TW_LATER_WHEEL_SIZE (1 << ERTS_TW_LATER_WHEEL_BITS)
+
typedef enum {
ERTS_NO_TIME_WARP_MODE,
ERTS_SINGLE_TIME_WARP_MODE,
@@ -46,8 +81,8 @@ typedef ErtsMonotonicTime * ErtsNextTimeoutRef;
extern SysTimeval erts_first_emu_time;
-void erts_monitor_time_offset(Eterm id, Eterm ref);
-int erts_demonitor_time_offset(Eterm ref);
+void erts_monitor_time_offset(ErtsMonitor *mon);
+void erts_demonitor_time_offset(ErtsMonitor *mon);
int erts_init_time_sup(int, ErtsTimeWarpMode);
void erts_late_init_time_sup(void);
@@ -74,9 +109,6 @@ void erts_p_slpq(void);
void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec);
#endif
-typedef UWord erts_approx_time_t;
-erts_approx_time_t erts_get_approx_time(void);
-
int erts_has_time_correction(void);
int erts_check_time_adj_support(int time_correction,
ErtsTimeWarpMode time_warp_mode);
@@ -97,13 +129,23 @@ Eterm erts_get_monotonic_end_time(struct process *c_p);
Eterm erts_monotonic_time_source(struct process*c_p);
Eterm erts_system_time_source(struct process*c_p);
+void erts_runtime_elapsed_both(ErtsMonotonicTime *ms_user,
+ ErtsMonotonicTime *ms_sys,
+ ErtsMonotonicTime *ms_user_diff,
+ ErtsMonotonicTime *ms_sys_diff);
+void erts_wall_clock_elapsed_both(ErtsMonotonicTime *total,
+ ErtsMonotonicTime *diff);
+
#ifdef SYS_CLOCK_RESOLUTION
#define ERTS_CLKTCK_RESOLUTION ((ErtsMonotonicTime) (SYS_CLOCK_RESOLUTION*1000))
#else
#define ERTS_CLKTCK_RESOLUTION (erts_time_sup__.r.o.clktck_resolution)
#endif
-#define ERTS_TIMER_WHEEL_MSEC (ERTS_TIW_SIZE/(ERTS_CLKTCK_RESOLUTION/1000))
+#define ERTS_TW_SOON_WHEEL_MSEC (ERTS_TW_SOON_WHEEL_SIZE/(ERTS_CLKTCK_RESOLUTION/1000))
+#define ERTS_TW_LATER_WHEEL_MSEC (ERTS_TW_LATER_WHEEL_SIZE*ERTS_TW_SOON_WHEEL_MSEC/2)
+
+#define ERTS_TIMER_WHEEL_MSEC ERTS_TW_LATER_WHEEL_MSEC
struct erts_time_sup_read_only__ {
ErtsMonotonicTime monotonic_time_unit;
@@ -412,34 +454,25 @@ erts_time_unit_conversion(Uint64 value,
void erts_sched_init_time_sup(ErtsSchedulerData *esdp);
-#define ERTS_TWHEEL_SLOT_AT_ONCE -1
-#define ERTS_TWHEEL_SLOT_INACTIVE -2
+#define ERTS_TW_SLOT_INACTIVE (-2)
/*
** Timer entry:
*/
typedef struct erl_timer {
- struct erl_timer* next; /* next entry tiw slot or chain */
- struct erl_timer* prev; /* prev entry tiw slot or chain */
- union {
- struct {
- void (*timeout)(void*); /* called when timeout */
- void (*cancel)(void*); /* called when cancel (may be NULL) */
- void* arg; /* argument to timeout/cancel procs */
- } func;
- ErtsThrPrgrLaterOp cleanup;
- } u;
ErtsMonotonicTime timeout_pos; /* Timeout in absolute clock ticks */
+ struct erl_timer* next; /* next entry tiw slot or chain */
+ struct erl_timer* prev; /* prev entry tiw slot or chain */
+ void (*timeout)(void*); /* called when timeout */
+ void* arg; /* argument to timeout/cancel procs */
int slot;
} ErtsTWheelTimer;
typedef void (*ErlTimeoutProc)(void*);
-typedef void (*ErlCancelProc)(void*);
void erts_twheel_set_timer(ErtsTimerWheel *tiw,
ErtsTWheelTimer *p, ErlTimeoutProc timeout,
- ErlCancelProc cancel, void *arg,
- ErtsMonotonicTime timeout_pos);
+ void *arg, ErtsMonotonicTime timeout_pos);
void erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p);
ErtsTimerWheel *erts_create_timer_wheel(ErtsSchedulerData *esdp);
@@ -447,12 +480,13 @@ ErtsMonotonicTime erts_check_next_timeout_time(ErtsSchedulerData *);
ERTS_GLB_INLINE void erts_twheel_init_timer(ErtsTWheelTimer *p);
ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef);
+ERTS_GLB_INLINE ErtsMonotonicTime erts_tweel_read_timeout(ErtsTWheelTimer *twt);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void erts_twheel_init_timer(ErtsTWheelTimer *p)
{
- p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ p->slot = ERTS_TW_SLOT_INACTIVE;
}
ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_tmo_ref)
@@ -460,6 +494,12 @@ ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_
return *((ErtsMonotonicTime *) nxt_tmo_ref);
}
+ERTS_GLB_INLINE ErtsMonotonicTime
+erts_tweel_read_timeout(ErtsTWheelTimer *twt)
+{
+ return twt->timeout_pos;
+}
+
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
void
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 6aa2a7500f..29c698e34f 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -35,13 +35,31 @@
#include "erl_time.h"
#include "erl_driver.h"
#include "erl_nif.h"
+#include "erl_proc_sig_queue.h"
-static erts_smp_mtx_t erts_timeofday_mtx;
-static erts_smp_mtx_t erts_get_time_mtx;
+static erts_mtx_t erts_get_time_mtx;
-static SysTimes t_start; /* Used in elapsed_time_both */
-static ErtsMonotonicTime prev_wall_clock_elapsed; /* Used in wall_clock_elapsed_time_both */
-static ErtsMonotonicTime previous_now; /* Used in get_now */
+ /* used by erts_runtime_elapsed_both */
+typedef struct {
+ erts_mtx_t mtx;
+ ErtsMonotonicTime user;
+ ErtsMonotonicTime sys;
+} ErtsRunTimePrevData;
+
+static union {
+ ErtsRunTimePrevData data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsRunTimePrevData))];
+} runtime_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static union {
+ erts_atomic64_t time;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_atomic64_t))];
+} wall_clock_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static union {
+ erts_atomic64_t time;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_atomic64_t))];
+} now_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE);
static ErtsMonitor *time_offset_monitors = NULL;
static Uint no_time_offset_monitors = 0;
@@ -140,7 +158,7 @@ typedef struct {
struct time_sup_infrequently_changed__ {
#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
struct {
- erts_smp_rwmtx_t rwmtx;
+ erts_rwmtx_t rwmtx;
ErtsTWheelTimer timer;
ErtsMonotonicCorrectionData cdata;
} parmon;
@@ -148,9 +166,9 @@ struct time_sup_infrequently_changed__ {
#endif
ErtsSystemTime sinit;
ErtsMonotonicTime not_corrected_moffset;
- erts_smp_atomic64_t offset;
+ erts_atomic64_t offset;
ErtsMonotonicTime shadow_offset;
- erts_smp_atomic32_t preliminary_offset;
+ erts_atomic32_t preliminary_offset;
};
struct time_sup_frequently_changed__ {
@@ -174,33 +192,22 @@ static struct {
ErtsTimeSupData erts_time_sup__ erts_align_attribute(ERTS_CACHE_LINE_SIZE);
-/*
- * erts_get_approx_time() returns an *approximate* time
- * in seconds. NOTE that this time may jump backwards!!!
- */
-erts_approx_time_t
-erts_get_approx_time(void)
-{
- ErtsSystemTime stime = erts_os_system_time();
- return (erts_approx_time_t) ERTS_MONOTONIC_TO_SEC(stime);
-}
-
static ERTS_INLINE void
init_time_offset(ErtsMonotonicTime offset)
{
- erts_smp_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset);
+ erts_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset);
}
static ERTS_INLINE void
set_time_offset(ErtsMonotonicTime offset)
{
- erts_smp_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset);
+ erts_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset);
}
static ERTS_INLINE ErtsMonotonicTime
get_time_offset(void)
{
- return (ErtsMonotonicTime) erts_smp_atomic64_read_acqb(&time_sup.inf.c.offset);
+ return (ErtsMonotonicTime) erts_atomic64_read_acqb(&time_sup.inf.c.offset);
}
static ERTS_INLINE void
@@ -281,7 +288,7 @@ read_corrected_time(int os_drift_corrected)
ErtsMonotonicTime os_mtime;
ErtsMonotonicCorrectionInstance ci;
- erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
+ erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
os_mtime = erts_os_monotonic_time();
@@ -294,7 +301,7 @@ read_corrected_time(int os_drift_corrected)
ci = time_sup.inf.c.parmon.cdata.insts.prev;
}
- erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
+ erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
return calc_corrected_erl_mtime(os_mtime, &ci, NULL,
os_drift_corrected);
@@ -372,13 +379,13 @@ check_time_correction(void *vesdp)
int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time;
int set_new_correction = 0, begin_short_intervals = 0;
- erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
+ erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
erts_os_times(&os_mtime, &os_stime);
ci = time_sup.inf.c.parmon.cdata.insts.curr;
- erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
+ erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
if (os_mtime < ci.os_mtime)
erts_exit(ERTS_ABORT_EXIT,
@@ -393,7 +400,7 @@ check_time_correction(void *vesdp)
if (time_sup.inf.c.shadow_offset) {
ERTS_TIME_ASSERT(time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE);
- if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
+ if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
sdiff += time_sup.inf.c.shadow_offset;
else
time_sup.inf.c.shadow_offset = 0;
@@ -416,7 +423,7 @@ check_time_correction(void *vesdp)
}
}
else if ((time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE
- && erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
+ && erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
&& (sdiff < -2*time_sup.r.o.adj.small_diff
|| 2*time_sup.r.o.adj.small_diff < sdiff)) {
/*
@@ -641,7 +648,7 @@ check_time_correction(void *vesdp)
#endif
if (set_new_correction) {
- erts_smp_rwmtx_rwlock(&time_sup.inf.c.parmon.rwmtx);
+ erts_rwmtx_rwlock(&time_sup.inf.c.parmon.rwmtx);
os_mtime = erts_os_monotonic_time();
@@ -669,7 +676,7 @@ check_time_correction(void *vesdp)
time_sup.inf.c.parmon.cdata.insts.curr.os_mtime = os_mtime;
time_sup.inf.c.parmon.cdata.insts.curr.correction = new_correction;
- erts_smp_rwmtx_rwunlock(&time_sup.inf.c.parmon.rwmtx);
+ erts_rwmtx_rwunlock(&time_sup.inf.c.parmon.rwmtx);
}
if (!esdp)
@@ -678,7 +685,6 @@ check_time_correction(void *vesdp)
erts_twheel_set_timer(esdp->timer_wheel,
&time_sup.inf.c.parmon.timer,
check_time_correction,
- NULL,
(void *) esdp,
timeout_pos);
}
@@ -729,7 +735,6 @@ check_time_offset(void *vesdp)
erts_twheel_set_timer(esdp->timer_wheel,
&time_sup.inf.c.parmon.timer,
check_time_offset,
- NULL,
vesdp,
timeout_pos);
}
@@ -789,13 +794,13 @@ finalize_corrected_time_offset(ErtsSystemTime *stimep)
ErtsMonotonicCorrectionInstance ci;
int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time;
- erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
+ erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
erts_os_times(&os_mtime, stimep);
ci = time_sup.inf.c.parmon.cdata.insts.curr;
- erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
+ erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx);
if (os_mtime < ci.os_mtime)
erts_exit(ERTS_ABORT_EXIT,
@@ -836,7 +841,6 @@ late_init_time_correction(ErtsSchedulerData *esdp)
erts_twheel_set_timer(esdp->timer_wheel,
&time_sup.inf.c.parmon.timer,
check_func,
- NULL,
(quick_init_drift_adj
? NULL
: esdp),
@@ -849,7 +853,7 @@ static ErtsMonotonicTime get_not_corrected_time(void)
{
ErtsMonotonicTime stime, mtime;
- erts_smp_mtx_lock(&erts_get_time_mtx);
+ erts_mtx_lock(&erts_get_time_mtx);
stime = erts_os_system_time();
@@ -875,7 +879,7 @@ static ErtsMonotonicTime get_not_corrected_time(void)
ASSERT(stime == mtime + time_sup.inf.c.not_corrected_moffset);
- erts_smp_mtx_unlock(&erts_get_time_mtx);
+ erts_mtx_unlock(&erts_get_time_mtx);
return mtime;
}
@@ -957,16 +961,20 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
ASSERT(ERTS_MONOTONIC_TIME_MIN < ERTS_MONOTONIC_TIME_MAX);
- erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday");
- erts_smp_mtx_init(&erts_get_time_mtx, "get_time");
+ erts_mtx_init(&erts_get_time_mtx, "get_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ erts_mtx_init(&runtime_prev.data.mtx, "runtime", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ runtime_prev.data.user = 0;
+ runtime_prev.data.sys = 0;
time_sup.r.o.correction = time_correction;
time_sup.r.o.warp_mode = time_warp_mode;
if (time_warp_mode == ERTS_SINGLE_TIME_WARP_MODE)
- erts_smp_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 1);
+ erts_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 1);
else
- erts_smp_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 0);
+ erts_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 0);
time_sup.inf.c.shadow_offset = 0;
#if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
@@ -1110,7 +1118,7 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
if (time_sup.r.o.correction) {
ErtsMonotonicCorrectionData *cdatap;
- erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER;
ErtsMonotonicTime offset;
erts_os_times(&time_sup.inf.c.minit,
&time_sup.inf.c.sinit);
@@ -1120,11 +1128,12 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
offset -= ERTS_MONOTONIC_BEGIN;
init_time_offset(offset);
- rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
- rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx,
- &rwmtx_opts, "get_corrected_time");
+ erts_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx, &rwmtx_opts,
+ "get_corrected_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
cdatap = &time_sup.inf.c.parmon.cdata;
@@ -1157,9 +1166,13 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
time_sup.f.c.last_not_corrected_time = 0;
}
- prev_wall_clock_elapsed = 0;
+ erts_atomic64_init_nob(&wall_clock_prev.time,
+ (erts_aint64_t) 0);
+
+ erts_atomic64_init_nob(
+ &now_prev.time,
+ (erts_aint64_t) ERTS_MONOTONIC_TO_USEC(get_time_offset()));
- previous_now = ERTS_MONOTONIC_TO_USEC(get_time_offset());
#ifdef DEBUG
time_sup_initialized = 1;
@@ -1200,7 +1213,7 @@ ErtsTimeOffsetState erts_time_offset_state(void)
case ERTS_NO_TIME_WARP_MODE:
return ERTS_TIME_OFFSET_FINAL;
case ERTS_SINGLE_TIME_WARP_MODE:
- if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
+ if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
return ERTS_TIME_OFFSET_PRELIMINARY;
return ERTS_TIME_OFFSET_FINAL;
case ERTS_MULTI_TIME_WARP_MODE:
@@ -1233,9 +1246,9 @@ erts_finalize_time_offset(void)
case ERTS_SINGLE_TIME_WARP_MODE: {
ErtsTimeOffsetState res = ERTS_TIME_OFFSET_FINAL;
- erts_smp_mtx_lock(&erts_get_time_mtx);
+ erts_mtx_lock(&erts_get_time_mtx);
- if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) {
+ if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) {
ErtsMonotonicTime mtime, new_offset;
#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
@@ -1272,11 +1285,11 @@ erts_finalize_time_offset(void)
set_time_offset(new_offset);
schedule_send_time_offset_changed_notifications(new_offset);
- erts_smp_atomic32_set_nob(&time_sup.inf.c.preliminary_offset, 0);
+ erts_atomic32_set_nob(&time_sup.inf.c.preliminary_offset, 0);
res = ERTS_TIME_OFFSET_PRELIMINARY;
}
- erts_smp_mtx_unlock(&erts_get_time_mtx);
+ erts_mtx_unlock(&erts_get_time_mtx);
return res;
}
@@ -1289,56 +1302,93 @@ erts_finalize_time_offset(void)
/* info functions */
void
-elapsed_time_both(UWord *ms_user, UWord *ms_sys,
- UWord *ms_user_diff, UWord *ms_sys_diff)
+erts_runtime_elapsed_both(ErtsMonotonicTime *ms_user, ErtsMonotonicTime *ms_sys,
+ ErtsMonotonicTime *ms_user_diff, ErtsMonotonicTime *ms_sys_diff)
{
- UWord prev_total_user, prev_total_sys;
- UWord total_user, total_sys;
+ ErtsMonotonicTime prev_user, prev_sys, user, sys;
+
+#ifdef HAVE_GETRUSAGE
+
+ struct rusage now;
+
+ if (getrusage(RUSAGE_SELF, &now) != 0) {
+ erts_exit(ERTS_ABORT_EXIT, "getrusage(RUSAGE_SELF, _) failed: %d\n", errno);
+ return;
+ }
+
+ user = (ErtsMonotonicTime) now.ru_utime.tv_sec;
+ user *= (ErtsMonotonicTime) 1000000;
+ user += (ErtsMonotonicTime) now.ru_utime.tv_usec;
+ user /= (ErtsMonotonicTime) 1000;
+
+ sys = (ErtsMonotonicTime) now.ru_stime.tv_sec;
+ sys *= (ErtsMonotonicTime) 1000000;
+ sys += (ErtsMonotonicTime) now.ru_stime.tv_usec;
+ sys /= (ErtsMonotonicTime) 1000;
+
+#else
+
SysTimes now;
sys_times(&now);
- total_user = (now.tms_utime * 1000) / SYS_CLK_TCK;
- total_sys = (now.tms_stime * 1000) / SYS_CLK_TCK;
+ user = (ErtsMonotonicTime) now.tms_utime;
+ user *= (ErtsMonotonicTime) 1000;
+ user /= (ErtsMonotonicTime) SYS_CLK_TCK;
- if (ms_user != NULL)
- *ms_user = total_user;
- if (ms_sys != NULL)
- *ms_sys = total_sys;
+ sys = (ErtsMonotonicTime) now.tms_stime;
+ sys *= (ErtsMonotonicTime) 1000;
+ sys /= (ErtsMonotonicTime) SYS_CLK_TCK;
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- prev_total_user = (t_start.tms_utime * 1000) / SYS_CLK_TCK;
- prev_total_sys = (t_start.tms_stime * 1000) / SYS_CLK_TCK;
- t_start = now;
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+#endif
+
+ if (ms_user)
+ *ms_user = user;
+ if (ms_sys)
+ *ms_sys = sys;
+
+ if (ms_user_diff || ms_sys_diff) {
+
+ erts_mtx_lock(&runtime_prev.data.mtx);
+
+ prev_user = runtime_prev.data.user;
+ prev_sys = runtime_prev.data.sys;
+ runtime_prev.data.user = user;
+ runtime_prev.data.sys = sys;
+
+ erts_mtx_unlock(&runtime_prev.data.mtx);
- if (ms_user_diff != NULL)
- *ms_user_diff = total_user - prev_total_user;
-
- if (ms_sys_diff != NULL)
- *ms_sys_diff = total_sys - prev_total_sys;
+ if (ms_user_diff)
+ *ms_user_diff = user - prev_user;
+ if (ms_sys_diff)
+ *ms_sys_diff = sys - prev_sys;
+ }
}
/* wall clock routines */
void
-wall_clock_elapsed_time_both(UWord *ms_total, UWord *ms_diff)
+erts_wall_clock_elapsed_both(ErtsMonotonicTime *ms_total, ErtsMonotonicTime *ms_diff)
{
ErtsMonotonicTime now, elapsed;
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
now = time_sup.r.o.get_time();
update_last_mtime(NULL, now);
elapsed = ERTS_MONOTONIC_TO_MSEC(now);
- *ms_total = (UWord) elapsed;
- *ms_diff = (UWord) (elapsed - prev_wall_clock_elapsed);
- prev_wall_clock_elapsed = elapsed;
+ elapsed -= ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_BEGIN);
+
+ *ms_total = elapsed;
+
+ if (ms_diff) {
+ ErtsMonotonicTime prev;
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ prev = ((ErtsMonotonicTime)
+ erts_atomic64_xchg_mb(&wall_clock_prev.time,
+ (erts_aint64_t) elapsed));
+
+ *ms_diff = elapsed - prev;
+ }
}
/* get current time */
@@ -1502,7 +1552,7 @@ static time_t gregday(int year, int month, int day)
pyear = gyear - 1;
ndays = (pyear/4) - (pyear/100) + (pyear/400) + pyear*365 + 366;
}
- /* number of days in all months preceeding month */
+ /* number of days in all months preceding month */
for (m = 1; m < month; m++)
ndays += mdays[m];
/* Extra day if leap year and March or later */
@@ -1716,22 +1766,27 @@ univ_to_local(Sint *year, Sint *month, Sint *day,
void
get_now(Uint* megasec, Uint* sec, Uint* microsec)
{
- ErtsMonotonicTime now_megasec, now_sec, now, mtime, time_offset;
+ ErtsMonotonicTime now_megasec, now_sec, now, prev, mtime, time_offset;
mtime = time_sup.r.o.get_time();
time_offset = get_time_offset();
update_last_mtime(NULL, mtime);
now = ERTS_MONOTONIC_TO_USEC(mtime + time_offset);
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
/* Make sure now time is later than last time */
- if (now <= previous_now)
- now = previous_now + 1;
-
- previous_now = now;
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ prev = erts_atomic64_read_nob(&now_prev.time);
+ while (1) {
+ ErtsMonotonicTime act;
+ if (now <= prev)
+ now = prev + 1;
+ act = ((ErtsMonotonicTime)
+ erts_atomic64_cmpxchg_mb(&now_prev.time,
+ (erts_aint64_t) now,
+ (erts_aint64_t) prev));
+ if (act == prev)
+ break;
+ prev = act;
+ }
now_megasec = now / ERTS_MONOTONIC_TIME_TERA;
now_sec = now / ERTS_MONOTONIC_TIME_MEGA;
@@ -1805,7 +1860,7 @@ void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) {
SysCpuTime t;
SysTimespec tp;
- sys_get_proc_cputime(t, tp);
+ sys_get_cputime(t, tp);
*microsec = (Uint)(tp.tv_nsec / 1000);
t = (tp.tv_sec / 1000000);
*megasec = (Uint)(t % 1000000);
@@ -1816,39 +1871,39 @@ void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) {
#include "big.h"
void
-erts_monitor_time_offset(Eterm id, Eterm ref)
+erts_monitor_time_offset(ErtsMonitor *mon)
{
- erts_smp_mtx_lock(&erts_get_time_mtx);
- erts_add_monitor(&time_offset_monitors, MON_TIME_OFFSET, ref, id, NIL);
+ erts_mtx_lock(&erts_get_time_mtx);
+ erts_monitor_list_insert(&time_offset_monitors, mon);
no_time_offset_monitors++;
- erts_smp_mtx_unlock(&erts_get_time_mtx);
+ erts_mtx_unlock(&erts_get_time_mtx);
}
-int
-erts_demonitor_time_offset(Eterm ref)
-{
- int res;
- ErtsMonitor *mon;
- ASSERT(is_internal_ref(ref));
- erts_smp_mtx_lock(&erts_get_time_mtx);
- mon = erts_remove_monitor(&time_offset_monitors, ref);
- if (!mon)
- res = 0;
- else {
- ASSERT(no_time_offset_monitors > 0);
- no_time_offset_monitors--;
- res = 1;
- }
- erts_smp_mtx_unlock(&erts_get_time_mtx);
- if (res)
- erts_destroy_monitor(mon);
- return res;
+void
+erts_demonitor_time_offset(ErtsMonitor *mon)
+{
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
+ ASSERT(erts_monitor_is_origin(mon));
+ ASSERT(mon->type == ERTS_MON_TYPE_TIME_OFFSET);
+
+ erts_mtx_lock(&erts_get_time_mtx);
+
+ ASSERT(erts_monitor_is_in_table(&mdp->target));
+
+ erts_monitor_list_delete(&time_offset_monitors, &mdp->target);
+
+ ASSERT(no_time_offset_monitors > 0);
+ no_time_offset_monitors--;
+
+ erts_mtx_unlock(&erts_get_time_mtx);
+
+ erts_monitor_release_both(mdp);
}
typedef struct {
Eterm pid;
Eterm ref;
- Eterm heap[REF_THING_SIZE];
+ Eterm heap[ERTS_REF_THING_SIZE];
} ErtsTimeOffsetMonitorInfo;
typedef struct {
@@ -1860,20 +1915,22 @@ static void
save_time_offset_monitor(ErtsMonitor *mon, void *vcntxt)
{
ErtsTimeOffsetMonitorContext *cntxt;
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
Eterm *from_hp, *to_hp;
Uint mix;
int hix;
cntxt = (ErtsTimeOffsetMonitorContext *) vcntxt;
mix = (cntxt->ix)++;
- cntxt->to_mon_info[mix].pid = mon->pid;
+ ASSERT(is_internal_pid(mon->other.item));
+ cntxt->to_mon_info[mix].pid = mon->other.item;
to_hp = &cntxt->to_mon_info[mix].heap[0];
- ASSERT(is_internal_ref(mon->ref));
- from_hp = internal_ref_val(mon->ref);
- ASSERT(thing_arityval(*from_hp) + 1 == REF_THING_SIZE);
+ ASSERT(is_internal_ordinary_ref(mdp->ref));
+ from_hp = internal_ref_val(mdp->ref);
+ ASSERT(thing_arityval(*from_hp) + 1 == ERTS_REF_THING_SIZE);
- for (hix = 0; hix < REF_THING_SIZE; hix++)
+ for (hix = 0; hix < ERTS_REF_THING_SIZE; hix++)
to_hp[hix] = from_hp[hix];
cntxt->to_mon_info[mix].ref
@@ -1897,7 +1954,7 @@ send_time_offset_changed_notifications(void *new_offsetp)
#endif
new_offset -= ERTS_MONOTONIC_OFFSET_NATIVE;
- erts_smp_mtx_lock(&erts_get_time_mtx);
+ erts_mtx_lock(&erts_get_time_mtx);
no_monitors = no_time_offset_monitors;
if (no_monitors) {
@@ -1915,14 +1972,14 @@ send_time_offset_changed_notifications(void *new_offsetp)
cntxt.ix = 0;
cntxt.to_mon_info = to_mon_info;
- erts_doforall_monitors(time_offset_monitors,
- save_time_offset_monitor,
- &cntxt);
+ erts_monitor_list_foreach(time_offset_monitors,
+ save_time_offset_monitor,
+ &cntxt);
ASSERT(cntxt.ix == no_monitors);
}
- erts_smp_mtx_unlock(&erts_get_time_mtx);
+ erts_mtx_unlock(&erts_get_time_mtx);
if (no_monitors) {
Eterm *hp, *patch_refp, new_offset_term, message_template;
@@ -1933,7 +1990,7 @@ send_time_offset_changed_notifications(void *new_offsetp)
hp = (Eterm *) (tmp + no_monitors*sizeof(ErtsTimeOffsetMonitorInfo));
hsz = 6; /* 5-tuple */
- hsz += REF_THING_SIZE;
+ hsz += ERTS_REF_THING_SIZE;
hsz += ERTS_SINT64_HEAP_SIZE(new_offset);
if (IS_SSMALL(new_offset))
@@ -1951,26 +2008,14 @@ send_time_offset_changed_notifications(void *new_offsetp)
ASSERT(*patch_refp == THE_NON_VALUE);
for (mix = 0; mix < no_monitors; mix++) {
- Process *rp = erts_proc_lookup(to_mon_info[mix].pid);
- if (rp) {
- Eterm ref = to_mon_info[mix].ref;
- ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
- if (erts_lookup_monitor(ERTS_P_MONITORS(rp), ref)) {
- ErtsMessage *mp;
- ErlOffHeap *ohp;
- Eterm message;
-
- mp = erts_alloc_message_heap(rp, &rp_locks,
- hsz, &hp, &ohp);
- *patch_refp = ref;
- ASSERT(hsz == size_object(message_template));
- message = copy_struct(message_template, hsz, &hp, ohp);
- erts_queue_message(rp, rp_locks, mp, message, am_clock_service);
- }
- erts_smp_proc_unlock(rp, rp_locks);
- }
- }
+ *patch_refp = to_mon_info[mix].ref;
+ erts_proc_sig_send_persistent_monitor_msg(ERTS_MON_TYPE_TIME_OFFSET,
+ *patch_refp,
+ am_clock_service,
+ to_mon_info[mix].pid,
+ message_template,
+ hsz);
+ }
erts_free(ERTS_ALC_T_TMP, tmp);
}
@@ -2159,6 +2204,8 @@ time_unit_conversion(Process *c_p, Eterm term, ErtsMonotonicTime val, ErtsMonoto
ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
break;
#endif
+ case am_perf_counter:
+ goto trap_to_erlang_code;
default: {
Eterm value, native_res;
#ifndef ARCH_64
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 8c84303997..2350d4c02f 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -77,8 +77,8 @@ static Eterm system_profile;
int erts_cpu_timestamp;
#endif
-static erts_smp_mtx_t smq_mtx;
-static erts_smp_rwmtx_t sys_trace_rwmtx;
+static erts_mtx_t smq_mtx;
+static erts_rwmtx_t sys_trace_rwmtx;
enum ErtsSysMsgType {
SYS_MSG_TYPE_UNDEFINED,
@@ -237,7 +237,6 @@ write_timestamp(ErtsTraceTimeStamp *tsp, Eterm **hpp)
}
}
-#ifdef ERTS_SMP
static ERTS_INLINE Uint
patch_ts_size(int ts_type)
@@ -257,7 +256,6 @@ patch_ts_size(int ts_type)
return 0;
}
}
-#endif /* ERTS_SMP */
/*
* Write a timestamp. The timestamp MUST be the last
@@ -298,18 +296,11 @@ write_ts(int ts_type, Eterm *hp, ErlHeapFragment *bp, Process *tracer)
if (shrink) {
if (bp)
bp->used_size -= shrink;
-#ifndef ERTS_SMP
- else if (tracer) {
- Eterm *endp = ts_hp + shrink;
- HRelease(tracer, endp, ts_hp);
- }
-#endif
}
return res;
}
-#ifdef ERTS_SMP
static void enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
Eterm from,
Eterm to,
@@ -321,7 +312,6 @@ static void enqueue_sys_msg(enum ErtsSysMsgType type,
Eterm msg,
ErlHeapFragment *bp);
static void init_sys_msg_dispatcher(void);
-#endif
static void init_tracer_nif(void);
static int tracer_cmp_fun(void*, void*);
@@ -332,11 +322,12 @@ static void tracer_free_fun(void*);
typedef struct ErtsTracerNif_ ErtsTracerNif;
void erts_init_trace(void) {
- erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
- rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers");
+ erts_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
#ifdef HAVE_ERTS_NOW_CPU
erts_cpu_timestamp = 0;
@@ -349,9 +340,7 @@ void erts_init_trace(void) {
default_port_trace_flags = F_INITIAL_TRACE_FLAGS;
default_port_tracer = erts_tracer_nil;
system_seq_tracer = erts_tracer_nil;
-#ifdef ERTS_SMP
init_sys_msg_dispatcher();
-#endif
init_tracer_nif();
}
@@ -411,43 +400,35 @@ static Uint active_sched;
void
erts_system_profile_setup_active_schedulers(void)
{
- ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking());
active_sched = erts_active_schedulers();
}
static void
exiting_reset(Eterm exiting)
{
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
if (exiting == system_monitor) {
-#ifdef ERTS_SMP
system_monitor = NIL;
/* Let the trace message dispatcher clear flags, etc */
-#else
- erts_system_monitor_clear(NULL);
-#endif
}
if (exiting == system_profile) {
-#ifdef ERTS_SMP
system_profile = NIL;
/* Let the trace message dispatcher clear flags, etc */
-#else
- erts_system_profile_clear(NULL);
-#endif
}
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
}
void
erts_trace_check_exiting(Eterm exiting)
{
int reset = 0;
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
if (exiting == system_monitor)
reset = 1;
else if (exiting == system_profile)
reset = 1;
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
if (reset)
exiting_reset(exiting);
}
@@ -467,7 +448,7 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new
}
}
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
old = system_seq_tracer;
system_seq_tracer = erts_tracer_nil;
erts_tracer_update(&system_seq_tracer, new);
@@ -475,7 +456,7 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "set seq tracer new=%T old=%T\n", new, old);
#endif
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
return old;
}
@@ -483,12 +464,12 @@ ErtsTracer
erts_get_system_seq_tracer(void)
{
ErtsTracer st;
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
st = system_seq_tracer;
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "get seq tracer %T\n", st);
#endif
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
if (st != erts_tracer_nil &&
call_enabled_tracer(st, NULL, TRACE_FUN_ENABLED,
@@ -521,8 +502,8 @@ get_default_tracing(Uint *flagsp, ErtsTracer *tracerp,
ErtsTracer curr_default_tracer = *default_tracer;
if (tracerp) {
/* we only have a rlock, so we have to unlock and then rwlock */
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
}
/* check if someone else changed default tracer
while we got the write lock, if so we don't do
@@ -532,8 +513,8 @@ get_default_tracing(Uint *flagsp, ErtsTracer *tracerp,
ERTS_TRACER_CLEAR(default_tracer);
}
if (tracerp) {
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
}
}
}
@@ -566,98 +547,84 @@ void
erts_change_default_proc_tracing(int setflags, Uint flagsp,
const ErtsTracer tracer)
{
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
erts_change_default_tracing(
setflags, flagsp, tracer,
&default_proc_trace_flags,
&default_proc_tracer);
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
}
void
erts_change_default_port_tracing(int setflags, Uint flagsp,
const ErtsTracer tracer)
{
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
erts_change_default_tracing(
setflags, flagsp, tracer,
&default_port_trace_flags,
&default_port_tracer);
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
}
void
erts_get_default_proc_tracing(Uint *flagsp, ErtsTracer *tracerp)
{
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
*tracerp = erts_tracer_nil; /* initialize */
get_default_tracing(
flagsp, tracerp,
&default_proc_trace_flags,
&default_proc_tracer);
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
}
void
erts_get_default_port_tracing(Uint *flagsp, ErtsTracer *tracerp)
{
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
*tracerp = erts_tracer_nil; /* initialize */
get_default_tracing(
flagsp, tracerp,
&default_port_trace_flags,
&default_port_tracer);
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
}
void
erts_set_system_monitor(Eterm monitor)
{
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
system_monitor = monitor;
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
}
Eterm
erts_get_system_monitor(void)
{
Eterm monitor;
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
monitor = system_monitor;
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
return monitor;
}
/* Performance monitoring */
void erts_set_system_profile(Eterm profile) {
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwlock(&sys_trace_rwmtx);
system_profile = profile;
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_rwmtx_rwunlock(&sys_trace_rwmtx);
}
Eterm
erts_get_system_profile(void) {
Eterm profile;
- erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ erts_rwmtx_rlock(&sys_trace_rwmtx);
profile = system_profile;
- erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_rwmtx_runlock(&sys_trace_rwmtx);
return profile;
}
-
-#ifdef HAVE_ERTS_NOW_CPU
-# define GET_NOW(m, s, u) \
-do { \
- if (erts_cpu_timestamp) \
- erts_get_now_cpu(m, s, u); \
- else \
- get_now(m, s, u); \
-} while (0)
-#else
-# define GET_NOW(m, s, u) do {get_now(m, s, u);} while (0)
-#endif
-
-
static void
write_sys_msg_to_port(Eterm unused_to,
Port* trace_port,
@@ -678,71 +645,11 @@ write_sys_msg_to_port(Eterm unused_to,
erts_exit(ERTS_ERROR_EXIT, "Internal error in do_send_to_port: %d\n", ptr-buffer);
}
-#ifndef ERTS_SMP
- if (!INVALID_TRACER_PORT(trace_port, trace_port->common.id))
-#endif
erts_raw_port_command(trace_port, buffer, ptr-buffer);
erts_free(ERTS_ALC_T_TMP, (void *) buffer);
}
-#ifndef ERTS_SMP
-/* Profile send
- * Checks if profiler is port or process
- * Eterm msg is local, need copying.
- */
-
-static void
-profile_send(Eterm from, Eterm message) {
- Uint sz = 0;
- Uint *hp = NULL;
- Eterm msg = NIL;
- Process *profile_p = NULL;
-
- Eterm profiler = erts_get_system_profile();
-
- /* do not profile profiler pid */
- if (from == profiler) return;
-
- if (is_internal_port(profiler)) {
- Port *profiler_port = NULL;
-
- /* not smp */
-
- profiler_port = erts_id2port_sflgs(profiler,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (profiler_port) {
- write_sys_msg_to_port(profiler,
- profiler_port,
- NIL, /* or current process->common.id */
- SYS_MSG_TYPE_SYSPROF,
- message);
- erts_port_release(profiler_port);
- }
-
- } else {
- ErtsMessage *mp;
- ASSERT(is_internal_pid(profiler));
-
- profile_p = erts_proc_lookup(profiler);
-
- if (!profile_p)
- return;
-
- sz = size_object(message);
- mp = erts_alloc_message(sz, &hp);
- if (sz == 0)
- msg = message;
- else
- msg = copy_struct(message, sz, &hp, &mp->hfrag.off_heap);
-
- erts_queue_message(profile_p, 0, mp, msg, from);
- }
-}
-
-#endif
static void
trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what)
@@ -781,7 +688,8 @@ trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what)
tmp = make_small(0);
} else {
hp = HAlloc(p, 4);
- tmp = TUPLE3(hp,p->current[0],p->current[1],make_small(p->current[2]));
+ tmp = TUPLE3(hp,p->current->module,p->current->function,
+ make_small(p->current->arity));
hp += 4;
}
@@ -813,9 +721,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
ErtsTracerNif *tnif = NULL;
ErtsTracingEvent* te;
Eterm pam_result;
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl;
-#endif
ASSERT(ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND));
@@ -840,9 +746,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
} else
pam_result = am_true;
-#ifdef ERTS_SMP
dhndl = erts_thr_progress_unmanaged_delay();
-#endif
if (is_internal_pid(to)) {
if (!erts_proc_lookup(to))
@@ -860,9 +764,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
operation, msg, to, pam_result);
}
-#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
erts_match_set_release_result_trace(p, pam_result);
}
@@ -1027,14 +929,14 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
{
Eterm mfa;
- BeamInstr *code_ptr = find_function_from_pc(pc);
+ ErtsCodeMFA *cmfa = find_function_from_pc(pc);
-
- if (!code_ptr) {
+ if (!cmfa) {
mfa = am_undefined;
} else {
Eterm *hp = HAlloc(p, 4);
- mfa = TUPLE3(hp, code_ptr[0], code_ptr[1], make_small(code_ptr[2]));
+ mfa = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
}
send_to_tracer_nif(p, &p->common, p->common.id, NULL, TRACE_FUN_T_CALL,
@@ -1046,11 +948,11 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
* or {trace, Pid, return_from, {Mod, Name, Arity}, Retval}
*/
void
-erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, ErtsTracer *tracer)
+erts_trace_return(Process* p, ErtsCodeMFA *mfa,
+ Eterm retval, ErtsTracer *tracer)
{
Eterm* hp;
- Eterm mfa, mod, name;
- int arity;
+ Eterm mfa_tuple;
Uint meta_flags, *tracee_flags;
ASSERT(tracer);
@@ -1084,15 +986,13 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, ErtsTracer *tracer)
tracee_flags = &meta_flags;
}
- mod = fi[0];
- name = fi[1];
- arity = fi[2];
-
hp = HAlloc(p, 4);
- mfa = TUPLE3(hp, mod, name, make_small(arity));
+ mfa_tuple = TUPLE3(hp, mfa->module, mfa->function,
+ make_small(mfa->arity));
hp += 4;
send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
- NULL, TRACE_FUN_T_CALL, am_return_from, mfa, retval, am_true);
+ NULL, TRACE_FUN_T_CALL, am_return_from, mfa_tuple,
+ retval, am_true);
}
/* Send {trace_ts, Pid, exception_from, {Mod, Name, Arity}, {Class,Value},
@@ -1103,7 +1003,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, ErtsTracer *tracer)
* Where Class is atomic but Value is any term.
*/
void
-erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
+erts_trace_exception(Process* p, ErtsCodeMFA *mfa, Eterm class, Eterm value,
ErtsTracer *tracer)
{
Eterm* hp;
@@ -1142,7 +1042,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
}
hp = HAlloc(p, 7);;
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm)mfa[2]));
+ mfa_tuple = TUPLE3(hp, mfa->module, mfa->function, make_small(mfa->arity));
hp += 4;
cv = TUPLE2(hp, class, value);
hp += 3;
@@ -1165,7 +1065,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
* if it is a pid or port we do a meta trace.
*/
Uint32
-erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
+erts_call_trace(Process* p, ErtsCodeInfo *info, Binary *match_spec,
Eterm* args, int local, ErtsTracer *tracer)
{
Eterm* hp;
@@ -1179,7 +1079,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
Eterm transformed_args[MAX_ARG];
ErtsTracer pre_ms_tracer = erts_tracer_nil;
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN);
ASSERT(tracer);
if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
@@ -1244,7 +1144,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* such as size_object() and copy_struct(), we must make sure that we
* temporarily convert any match contexts to sub binaries.
*/
- arity = (Eterm) mfa[2];
+ arity = info->mfa.arity;
for (i = 0; i < arity; i++) {
Eterm arg = args[i];
if (is_boxed(arg) && header_is_bin_matchstate(*boxed_val(arg))) {
@@ -1339,7 +1239,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
hp += 2;
}
}
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
+ mfa_tuple = TUPLE3(hp, info->mfa.module, info->mfa.function, mfa_tuple);
hp += 4;
/*
@@ -1436,6 +1336,7 @@ void
trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
{
ErtsTracerNif *tnif = NULL;
+ Eterm* o_hp = NULL;
Eterm* hp;
Uint sz = 0;
Eterm tup;
@@ -1446,7 +1347,7 @@ trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
if (is_non_value(msg)) {
(void) erts_process_gc_info(p, &sz, NULL, 0, 0);
- hp = HAlloc(p, sz + 3 + 2);
+ o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, (sz + 3 + 2) * sizeof(Eterm));
msg = erts_process_gc_info(p, NULL, &hp, 0, 0);
tup = TUPLE2(hp, am_wordsize, make_small(size)); hp += 3;
@@ -1455,29 +1356,22 @@ trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_GC,
what, msg, THE_NON_VALUE, am_true);
+ if (o_hp)
+ erts_free(ERTS_ALC_T_TMP, o_hp);
}
}
void
-monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint time)
+monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_fp,
+ ErtsCodeMFA *out_fp, Uint time)
{
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, in_mfa = am_undefined, out_mfa = am_undefined;
Eterm in_tpl, out_tpl, tmo_tpl, tmo, msg;
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p || p == monitor_p) {
- return;
- }
-#endif
/*
* Size: {monitor, pid, long_schedule, [{timeout, T}, {in, {M,F,A}},{out,{M,F,A}}]} ->
* 5 (top tuple of 4), (3 (elements) * 2 (cons)) + 3 (timeout tuple of 2) + size of Timeout +
@@ -1490,11 +1384,13 @@ monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint
hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
tmo = erts_bld_uint(&hp, NULL, time);
if (in_fp != NULL) {
- in_mfa = TUPLE3(hp,(Eterm) in_fp[0], (Eterm) in_fp[1], make_small(in_fp[2]));
+ in_mfa = TUPLE3(hp, in_fp->module, in_fp->function,
+ make_small(in_fp->arity));
hp +=4;
}
if (out_fp != NULL) {
- out_mfa = TUPLE3(hp,(Eterm) out_fp[0], (Eterm) out_fp[1], make_small(out_fp[2]));
+ out_mfa = TUPLE3(hp, out_fp->module, out_fp->function,
+ make_small(out_fp->arity));
hp +=4;
}
tmo_tpl = TUPLE2(hp,am_timeout, tmo);
@@ -1511,36 +1407,18 @@ monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint
hp += 2;
msg = TUPLE4(hp, am_monitor, p->common.id, am_long_schedule, list);
hp += 5;
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, 0, mp, msg, am_system);
- }
-#endif
}
void
monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
{
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, op;
Eterm op_tpl, tmo_tpl, tmo, msg;
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p) {
- return;
- }
-#endif
/*
* Size: {monitor, port, long_schedule, [{timeout, T}, {op, Operation}]} ->
* 5 (top tuple of 4), (2 (elements) * 2 (cons)) + 3 (timeout tuple of 2)
@@ -1557,7 +1435,6 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
case ERTS_PORT_TASK_TIMEOUT: op = am_timeout; break;
case ERTS_PORT_TASK_INPUT: op = am_input; break;
case ERTS_PORT_TASK_OUTPUT: op = am_output; break;
- case ERTS_PORT_TASK_EVENT: op = am_event; break;
case ERTS_PORT_TASK_DIST_CMD: op = am_dist_cmd; break;
default: op = am_undefined; break;
}
@@ -1576,24 +1453,13 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
hp += 2;
msg = TUPLE4(hp, am_monitor, pp->common.id, am_long_schedule, list);
hp += 5;
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, pp->common.id, NIL, msg, bp);
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, 0, mp, msg, am_system);
- }
-#endif
}
void
monitor_long_gc(Process *p, Uint time) {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, msg;
Eterm tags[] = {
@@ -1618,12 +1484,6 @@ monitor_long_gc(Process *p, Uint time) {
Eterm *hp_end;
#endif
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p || p == monitor_p)
- return;
-#endif
hsz = 0;
(void) erts_bld_atom_uword_2tup_list(NULL,
@@ -1651,24 +1511,13 @@ monitor_long_gc(Process *p, Uint time) {
ASSERT(hp == hp_end);
#endif
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, 0, mp, msg, am_system);
- }
-#endif
}
void
monitor_large_heap(Process *p) {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, msg;
Eterm tags[] = {
@@ -1692,13 +1541,6 @@ monitor_large_heap(Process *p) {
#endif
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p || p == monitor_p) {
- return;
- }
-#endif
hsz = 0;
(void) erts_bld_atom_uword_2tup_list(NULL,
@@ -1726,47 +1568,22 @@ monitor_large_heap(Process *p) {
ASSERT(hp == hp_end);
#endif
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, 0, mp, msg, am_system);
- }
-#endif
}
void
monitor_generic(Process *p, Eterm type, Eterm spec) {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Eterm *hp, msg;
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p || p == monitor_p)
- return;
-#endif
hp = ERTS_ALLOC_SYSMSG_HEAP(5, &bp, &off_heap, monitor_p);
msg = TUPLE4(hp, am_monitor, p->common.id, type, spec);
hp += 5;
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, 0, mp, msg, am_system);
- }
-#endif
}
@@ -1779,21 +1596,14 @@ profile_scheduler(Eterm scheduler_id, Eterm state) {
Eterm *hp, msg;
ErlHeapFragment *bp = NULL;
-#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (7 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- hp = local_heap;
-#else
Uint hsz;
hsz = 7 + patch_ts_size(erts_system_profile_ts_type)-1;
bp = new_message_buffer(hsz);
hp = bp->mem;
-#endif
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
switch (state) {
case am_active:
@@ -1815,14 +1625,8 @@ profile_scheduler(Eterm scheduler_id, Eterm state) {
/* Write timestamp in element 6 of the 'msg' tuple */
hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
-#ifndef ERTS_SMP
- profile_send(NIL, msg);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
-#endif
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
@@ -1831,7 +1635,7 @@ profile_scheduler(Eterm scheduler_id, Eterm state) {
void
trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
ErtsTracerNif *tnif = NULL;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (is_tracer_enabled(NULL, 0, &p->common, &tnif, TRACE_FUN_E_PORTS, am_open))
send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PORTS,
am_open, calling_pid, drv_name, am_true);
@@ -1848,9 +1652,9 @@ void
trace_port(Port *t_p, Eterm what, Eterm data) {
ErtsTracerNif *tnif = NULL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
|| erts_thr_progress_is_blocking());
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_PORTS, what))
send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PORTS,
what, data, THE_NON_VALUE, am_true);
@@ -1870,7 +1674,6 @@ trace_port_tmp_binary(char *bin, Sint sz, Binary **bptrp, Eterm **hp)
} else {
ProcBin* pb = (ProcBin *)*hp;
Binary *bptr = erts_bin_nrml_alloc(sz);
- erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, bin, sz);
pb->thing_word = HEADER_PROC_BIN;
pb->size = sz;
@@ -1894,9 +1697,9 @@ void
trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...)
{
ErtsTracerNif *tnif = NULL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
|| erts_thr_progress_is_blocking());
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_RECEIVE, am_receive)) {
/* We can use a stack heap here, as the nif is called in the
context of a port */
@@ -1995,8 +1798,8 @@ trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...)
TRACE_FUN_T_RECEIVE,
am_receive, data, THE_NON_VALUE, am_true);
- if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0)
- erts_bin_free(bptr);
+ if (bptr)
+ erts_bin_release(bptr);
if (orig_hp)
erts_free(ERTS_ALC_T_TMP, orig_hp);
@@ -2011,9 +1814,9 @@ trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists)
{
ErtsTracerNif *tnif = NULL;
Eterm op = exists ? am_send : am_send_to_non_existing_process;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
|| erts_thr_progress_is_blocking());
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, op))
send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND,
op, msg, receiver, am_true);
@@ -2022,9 +1825,9 @@ trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists)
void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz)
{
ErtsTracerNif *tnif = NULL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
|| erts_thr_progress_is_blocking());
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, am_send)) {
Eterm msg;
Binary* bptr = NULL;
@@ -2046,8 +1849,8 @@ void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz)
send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND,
am_send, msg, to, am_true);
- if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0)
- erts_bin_free(bptr);
+ if (bptr)
+ erts_bin_release(bptr);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
@@ -2070,9 +1873,9 @@ trace_sched_ports(Port *p, Eterm what) {
void
trace_sched_ports_where(Port *t_p, Eterm what, Eterm where) {
ErtsTracerNif *tnif = NULL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p)
|| erts_thr_progress_is_blocking());
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SCHED_PORT, what))
send_to_tracer_nif(NULL, &t_p->common, t_p->common.id,
tnif, TRACE_FUN_T_SCHED_PORT,
@@ -2087,24 +1890,14 @@ profile_runnable_port(Port *p, Eterm status) {
ErlHeapFragment *bp = NULL;
Eterm count = make_small(0);
-#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (6 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
-
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
-#else
Uint hsz;
hsz = 6 + patch_ts_size(erts_system_profile_ts_type)-1;
bp = new_message_buffer(hsz);
hp = bp->mem;
-#endif
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
msg = TUPLE5(hp, am_profile, p->common.id, status, count,
NIL /* Will be overwritten by timestamp */);
@@ -2113,14 +1906,8 @@ profile_runnable_port(Port *p, Eterm status) {
/* Write timestamp in element 5 of the 'msg' tuple */
hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
-#ifndef ERTS_SMP
- profile_send(p->common.id, msg);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
-#endif
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
/* Process profiling */
@@ -2129,54 +1916,42 @@ profile_runnable_proc(Process *p, Eterm status){
Eterm *hp, msg;
Eterm where = am_undefined;
ErlHeapFragment *bp = NULL;
- BeamInstr *current = NULL;
-
-#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (4 + 6 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ ErtsCodeMFA *cmfa = NULL;
- hp = local_heap;
-#else
ErtsThrPrgrDelayHandle dhndl;
Uint hsz = 4 + 6 + patch_ts_size(erts_system_profile_ts_type)-1;
-#endif
/* Assumptions:
* We possibly don't have the MAIN_LOCK for the process p here.
* We assume that we can read from p->current and p->i atomically
*/
-#ifdef ERTS_SMP
dhndl = erts_thr_progress_unmanaged_delay(); /* suspend purge operations */
-#endif
if (!ERTS_PROC_IS_EXITING(p)) {
if (p->current) {
- current = p->current;
+ cmfa = p->current;
} else {
- current = find_function_from_pc(p->i);
+ cmfa = find_function_from_pc(p->i);
}
}
-#ifdef ERTS_SMP
- if (!current) {
+ if (!cmfa) {
hsz -= 4;
}
bp = new_message_buffer(hsz);
hp = bp->mem;
-#endif
- if (current) {
- where = TUPLE3(hp, current[0], current[1], make_small(current[2])); hp += 4;
+ if (cmfa) {
+ where = TUPLE3(hp, cmfa->module, cmfa->function,
+ make_small(cmfa->arity));
+ hp += 4;
} else {
where = make_small(0);
}
-#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
msg = TUPLE5(hp, am_profile, p->common.id, status, where,
NIL /* Will be overwritten by timestamp */);
@@ -2185,20 +1960,13 @@ profile_runnable_proc(Process *p, Eterm status){
/* Write timestamp in element 5 of the 'msg' tuple */
hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
-#ifndef ERTS_SMP
- profile_send(p->common.id, msg);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
-#endif
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
/* End system_profile tracing */
-#ifdef ERTS_SMP
typedef struct ErtsSysMsgQ_ ErtsSysMsgQ;
struct ErtsSysMsgQ_ {
@@ -2244,7 +2012,7 @@ enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
sys_message_queue = smqp;
}
sys_message_queue_end = smqp;
- erts_smp_cnd_signal(&smq_cnd);
+ erts_cnd_signal(&smq_cnd);
}
static void
@@ -2254,15 +2022,15 @@ enqueue_sys_msg(enum ErtsSysMsgType type,
Eterm msg,
ErlHeapFragment *bp)
{
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
enqueue_sys_msg_unlocked(type, from, to, msg, bp);
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
void
erts_queue_error_logger_message(Eterm from, Eterm msg, ErlHeapFragment *bp)
{
- enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, am_error_logger, msg, bp);
+ enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, am_logger, msg, bp);
}
void
@@ -2308,10 +2076,10 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
&& !erts_system_monitor_flags.busy_port
&& !erts_system_monitor_flags.busy_dist_port)
break; /* Everything is disabled */
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
if (system_monitor == receiver || receiver == NIL)
erts_system_monitor_clear(NULL);
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
break;
case SYS_MSG_TYPE_SYSPROF:
if (receiver == NIL
@@ -2321,20 +2089,20 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
&& !erts_system_profile_flags.scheduler)
break;
/* Block system to clear flags */
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
if (system_profile == receiver || receiver == NIL) {
erts_system_profile_clear(NULL);
}
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
break;
case SYS_MSG_TYPE_ERRLGR: {
- char *no_elgger = "(no error logger present)";
+ char *no_elgger = "(no logger present)";
Eterm *tp;
Eterm tag;
if (is_not_tuple(smqp->msg)) {
unexpected_elmsg:
erts_fprintf(stderr,
- "%s unexpected error logger message: %T\n",
+ "%s unexpected logger message: %T\n",
no_elgger,
smqp->msg);
}
@@ -2370,38 +2138,38 @@ static void
sys_msg_dispatcher_wakeup(void *vwait_p)
{
int *wait_p = (int *) vwait_p;
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
*wait_p = 0;
- erts_smp_cnd_signal(&smq_cnd);
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_cnd_signal(&smq_cnd);
+ erts_mtx_unlock(&smq_mtx);
}
static void
sys_msg_dispatcher_prep_wait(void *vwait_p)
{
int *wait_p = (int *) vwait_p;
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
*wait_p = 1;
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
static void
sys_msg_dispatcher_fin_wait(void *vwait_p)
{
int *wait_p = (int *) vwait_p;
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
*wait_p = 0;
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
static void
sys_msg_dispatcher_wait(void *vwait_p)
{
int *wait_p = (int *) vwait_p;
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
while (*wait_p)
- erts_smp_cnd_wait(&smq_cnd, &smq_mtx);
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_cnd_wait(&smq_cnd, &smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
static void *
@@ -2409,6 +2177,7 @@ sys_msg_dispatcher_func(void *unused)
{
ErtsThrPrgrCallbacks callbacks;
ErtsSysMsgQ *local_sys_message_queue = NULL;
+ ErtsThrPrgrData *tpd;
int wait = 0;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2421,15 +2190,15 @@ sys_msg_dispatcher_func(void *unused)
callbacks.wait = sys_msg_dispatcher_wait;
callbacks.finalize_wait = sys_msg_dispatcher_fin_wait;
- erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
while (1) {
int end_wait = 0;
ErtsSysMsgQ *smqp;
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(!erts_thr_progress_is_blocking());
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
/* Free previously used queue ... */
while (local_sys_message_queue) {
@@ -2440,25 +2209,25 @@ sys_msg_dispatcher_func(void *unused)
/* Fetch current trace message queue ... */
if (!sys_message_queue) {
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
end_wait = 1;
- erts_thr_progress_active(NULL, 0);
- erts_thr_progress_prepare_wait(NULL);
- erts_smp_mtx_lock(&smq_mtx);
+ erts_thr_progress_active(tpd, 0);
+ erts_thr_progress_prepare_wait(tpd);
+ erts_mtx_lock(&smq_mtx);
}
while (!sys_message_queue)
- erts_smp_cnd_wait(&smq_cnd, &smq_mtx);
+ erts_cnd_wait(&smq_cnd, &smq_mtx);
local_sys_message_queue = sys_message_queue;
sys_message_queue = NULL;
sys_message_queue_end = NULL;
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
if (end_wait) {
- erts_thr_progress_finalize_wait(NULL);
- erts_thr_progress_active(NULL, 1);
+ erts_thr_progress_finalize_wait(tpd);
+ erts_thr_progress_active(tpd, 1);
}
/* Send trace messages ... */
@@ -2471,8 +2240,8 @@ sys_msg_dispatcher_func(void *unused)
Process *proc = NULL;
Port *port = NULL;
- if (erts_thr_progress_update(NULL))
- erts_thr_progress_leader_update(NULL);
+ if (erts_thr_progress_update(tpd))
+ erts_thr_progress_leader_update(tpd);
#ifdef DEBUG_PRINTOUTS
print_msg_type(smqp);
@@ -2502,7 +2271,7 @@ sys_msg_dispatcher_func(void *unused)
}
break;
case SYS_MSG_TYPE_ERRLGR:
- receiver = am_error_logger;
+ receiver = am_logger;
break;
default:
receiver = NIL;
@@ -2528,10 +2297,10 @@ sys_msg_dispatcher_func(void *unused)
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "delivered\n");
#endif
- erts_smp_proc_unlock(proc, proc_locks);
+ erts_proc_unlock(proc, proc_locks);
}
}
- else if (receiver == am_error_logger) {
+ else if (receiver == am_logger) {
proc = erts_whereis_process(NULL,0,receiver,proc_locks,0);
if (!proc)
goto failure;
@@ -2566,7 +2335,7 @@ sys_msg_dispatcher_func(void *unused)
sys_msg_disp_failure(smqp, receiver);
drop_sys_msg:
if (proc)
- erts_smp_proc_unlock(proc, proc_locks);
+ erts_proc_unlock(proc, proc_locks);
if (smqp->bp)
free_message_buffer(smqp->bp);
#ifdef DEBUG_PRINTOUTS
@@ -2586,7 +2355,7 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm,
ErlHeapFragment *))
{
ErtsSysMsgQ *sm;
- erts_smp_mtx_lock(&smq_mtx);
+ erts_mtx_lock(&smq_mtx);
for (sm = sys_message_queue; sm; sm = sm->next) {
Eterm to;
switch (sm->type) {
@@ -2597,7 +2366,7 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm,
to = erts_get_system_profile();
break;
case SYS_MSG_TYPE_ERRLGR:
- to = am_error_logger;
+ to = am_logger;
break;
default:
to = NIL;
@@ -2605,28 +2374,28 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm,
}
(*func)(sm->from, to, sm->msg, sm->bp);
}
- erts_smp_mtx_unlock(&smq_mtx);
+ erts_mtx_unlock(&smq_mtx);
}
static void
init_sys_msg_dispatcher(void)
{
- erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
thr_opts.detached = 1;
thr_opts.name = "sys_msg_dispatcher";
init_smq_element_alloc();
sys_message_queue = NULL;
sys_message_queue_end = NULL;
- erts_smp_cnd_init(&smq_cnd);
- erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
- erts_smp_thr_create(&sys_msg_dispatcher_tid,
+ erts_cnd_init(&smq_cnd);
+ erts_mtx_init(&smq_mtx, "sys_msg_q", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
+ erts_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
&thr_opts);
}
-#endif
#include "erl_nif.h"
@@ -2722,7 +2491,7 @@ static void init_tracer_template(ErtsTracerNif *tnif) {
}
static Hash *tracer_hash = NULL;
-static erts_smp_rwmtx_t tracer_mtx;
+static erts_rwmtx_t tracer_mtx;
static ErtsTracerNif *
load_tracer_nif(const ErtsTracer tracer)
@@ -2751,7 +2520,7 @@ load_tracer_nif(const ErtsTracer tracer)
for(i = 0; i < num_of_funcs; i++) {
for (j = 0; j < NIF_TRACER_TYPES; j++) {
- if (strcmp(tracers[j].name, funcs[i].name) == 0 && tracers[j].arity == funcs[i].arity) {
+ if (sys_strcmp(tracers[j].name, funcs[i].name) == 0 && tracers[j].arity == funcs[i].arity) {
tracers[j].cb = &(funcs[i]);
break;
}
@@ -2762,9 +2531,9 @@ load_tracer_nif(const ErtsTracer tracer)
return NULL;
}
- erts_smp_rwmtx_rwlock(&tracer_mtx);
+ erts_rwmtx_rwlock(&tracer_mtx);
tnif = hash_put(tracer_hash, &tnif_tmpl);
- erts_smp_rwmtx_rwunlock(&tracer_mtx);
+ erts_rwmtx_rwunlock(&tracer_mtx);
return tnif;
}
@@ -2775,14 +2544,14 @@ lookup_tracer_nif(const ErtsTracer tracer)
ErtsTracerNif tnif_tmpl;
ErtsTracerNif *tnif;
tnif_tmpl.module = ERTS_TRACER_MODULE(tracer);
- erts_smp_rwmtx_rlock(&tracer_mtx);
+ erts_rwmtx_rlock(&tracer_mtx);
if ((tnif = hash_get(tracer_hash, &tnif_tmpl)) == NULL) {
- erts_smp_rwmtx_runlock(&tracer_mtx);
+ erts_rwmtx_runlock(&tracer_mtx);
tnif = load_tracer_nif(tracer);
ASSERT(!tnif || tnif->nif_mod);
return tnif;
}
- erts_smp_rwmtx_runlock(&tracer_mtx);
+ erts_rwmtx_runlock(&tracer_mtx);
ASSERT(tnif->nif_mod);
return tnif;
}
@@ -2811,7 +2580,7 @@ erts_term_to_tracer(Eterm prefix, Eterm t)
state = tp[3];
}
} else {
- if (arityval(tp[0]) == 2 && is_atom(tp[2])) {
+ if (arityval(tp[0]) == 2 && is_atom(tp[1])) {
module = tp[1];
state = tp[2];
}
@@ -2847,6 +2616,38 @@ erts_tracer_to_term(Process *p, ErtsTracer tracer)
}
}
+Eterm
+erts_build_tracer_to_term(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, ErtsTracer tracer)
+{
+ Eterm res;
+ Eterm state;
+ Uint sz;
+
+ if (ERTS_TRACER_IS_NIL(tracer))
+ return am_false;
+
+ state = ERTS_TRACER_STATE(tracer);
+ sz = is_immed(state) ? 0 : size_object(state);
+
+ if (szp)
+ *szp += sz;
+
+ if (hpp)
+ res = is_immed(state) ? state : copy_struct(state, sz, hpp, ohp);
+ else
+ res = THE_NON_VALUE;
+
+ if (ERTS_TRACER_MODULE(tracer) != am_erl_tracer) {
+ if (szp)
+ *szp += 3;
+ if (hpp) {
+ res = TUPLE2(*hpp, ERTS_TRACER_MODULE(tracer), res);
+ *hpp += 3;
+ }
+ }
+
+ return res;
+}
static ERTS_INLINE int
send_to_tracer_nif_raw(Process *c_p, Process *tracee,
@@ -2920,17 +2721,17 @@ send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
Eterm t_p_id, ErtsTracerNif *tnif, enum ErtsTracerOpt topt,
Eterm tag, Eterm msg, Eterm extra, Eterm pam_result)
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
if (c_p) {
/* We have to hold the main lock of the currently executing process */
erts_proc_lc_chk_have_proc_locks(c_p, ERTS_PROC_LOCK_MAIN);
}
if (is_internal_pid(t_p->id)) {
/* We have to have at least one lock */
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL);
} else {
ASSERT(is_internal_port(t_p->id));
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p));
}
#endif
@@ -2973,17 +2774,17 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks,
enum ErtsTracerOpt topt, Eterm tag) {
Eterm nif_result;
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
if (c_p)
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks
|| erts_thr_progress_is_blocking());
if (is_internal_pid(t_p->id)) {
/* We have to have at least one lock */
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL
|| erts_thr_progress_is_blocking());
} else {
ASSERT(is_internal_port(t_p->id));
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)
+ ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)
|| erts_thr_progress_is_blocking());
}
#endif
@@ -3001,24 +2802,28 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks,
ASSERT(0);
}
- /* Only remove tracer on self() and ports */
+ /* Only remove tracer on (self() or ports) AND we are on a normal scheduler */
if (is_internal_port(t_p->id) || (c_p && c_p->common.id == t_p->id)) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsProcLocks c_p_xlocks = 0;
- if (is_internal_pid(t_p->id)) {
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
- if (c_p_locks != ERTS_PROC_LOCKS_ALL) {
- c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL;
- if (erts_smp_proc_trylock(c_p, c_p_xlocks) == EBUSY) {
- erts_smp_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (is_internal_pid(t_p->id)) {
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
+ if (c_p_locks != ERTS_PROC_LOCKS_ALL) {
+ c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL;
+ if (erts_proc_trylock(c_p, c_p_xlocks) == EBUSY) {
+ erts_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
}
}
- }
- erts_tracer_replace(t_p, erts_tracer_nil);
- t_p->trace_flags &= ~TRACEE_FLAGS;
- if (c_p_xlocks)
- erts_smp_proc_unlock(c_p, c_p_xlocks);
+ erts_tracer_replace(t_p, erts_tracer_nil);
+ t_p->trace_flags &= ~TRACEE_FLAGS;
+
+ if (c_p_xlocks)
+ erts_proc_unlock(c_p, c_p_xlocks);
+ }
}
return 0;
@@ -3058,7 +2863,7 @@ int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks,
void erts_tracer_replace(ErtsPTabElementCommon *t_p, const ErtsTracer tracer)
{
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
if (is_internal_pid(t_p->id) && !erts_thr_progress_is_blocking()) {
erts_proc_lc_chk_have_proc_locks((Process*)t_p, ERTS_PROC_LOCKS_ALL);
} else if (is_internal_port(t_p->id)) {
@@ -3176,10 +2981,12 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
static void init_tracer_nif()
{
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx");
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
+
+ erts_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_tracer_nif_clear();
@@ -3188,7 +2995,7 @@ static void init_tracer_nif()
int erts_tracer_nif_clear()
{
- erts_smp_rwmtx_rlock(&tracer_mtx);
+ erts_rwmtx_rlock(&tracer_mtx);
if (!tracer_hash || tracer_hash->nobjs) {
HashFunctions hf;
@@ -3200,19 +3007,19 @@ int erts_tracer_nif_clear()
hf.meta_free = (HMFREE_FUN) erts_free;
hf.meta_print = (HMPRINT_FUN) erts_print;
- erts_smp_rwmtx_runlock(&tracer_mtx);
- erts_smp_rwmtx_rwlock(&tracer_mtx);
+ erts_rwmtx_runlock(&tracer_mtx);
+ erts_rwmtx_rwlock(&tracer_mtx);
if (tracer_hash)
hash_delete(tracer_hash);
tracer_hash = hash_new(ERTS_ALC_T_TRACER_NIF, "tracer_hash", 10, hf);
- erts_smp_rwmtx_rwunlock(&tracer_mtx);
+ erts_rwmtx_rwunlock(&tracer_mtx);
return 1;
}
- erts_smp_rwmtx_runlock(&tracer_mtx);
+ erts_rwmtx_runlock(&tracer_mtx);
return 0;
}
@@ -3223,7 +3030,7 @@ static int tracer_cmp_fun(void* a, void* b)
static HashValue tracer_hash_fun(void* obj)
{
- return make_internal_hash(((ErtsTracerNif*)obj)->module);
+ return make_internal_hash(((ErtsTracerNif*)obj)->module, 0);
}
static void *tracer_alloc_fun(void* tmpl)
@@ -3231,7 +3038,7 @@ static void *tracer_alloc_fun(void* tmpl)
ErtsTracerNif *obj = erts_alloc(ERTS_ALC_T_TRACER_NIF,
sizeof(ErtsTracerNif) +
sizeof(ErtsThrPrgrLaterOp));
- memcpy(obj, tmpl, sizeof(*obj));
+ sys_memcpy(obj, tmpl, sizeof(*obj));
return obj;
}
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 0095d4386b..bccf31606e 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -87,7 +87,6 @@ void erts_set_system_monitor(Eterm monitor);
Eterm erts_get_system_monitor(void);
int erts_is_tracer_valid(Process* p);
-#ifdef ERTS_SMP
void erts_check_my_tracer_proc(Process *);
void erts_block_sys_msg_dispatcher(void);
void erts_release_sys_msg_dispatcher(void);
@@ -97,15 +96,14 @@ void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
ErlHeapFragment *));
void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
-#endif
void trace_send(Process*, Eterm, Eterm);
void trace_receive(Process*, Eterm, Eterm, ErtsTracingEvent*);
-Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec,
+Uint32 erts_call_trace(Process *p, ErtsCodeInfo *info, struct binary *match_spec,
Eterm* args, int local, ErtsTracer *tracer);
-void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval,
+void erts_trace_return(Process* p, ErtsCodeMFA *mfa, Eterm retval,
ErtsTracer *tracer);
-void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value,
+void erts_trace_exception(Process* p, ErtsCodeMFA *mfa, Eterm class, Eterm value,
ErtsTracer *tracer);
void erts_trace_return_to(Process *p, BeamInstr *pc);
void trace_sched(Process*, ErtsProcLocks, Eterm);
@@ -134,7 +132,8 @@ void erts_system_profile_setup_active_schedulers(void);
/* system_monitor */
void monitor_long_gc(Process *p, Uint time);
-void monitor_long_schedule_proc(Process *p, BeamInstr *in_i, BeamInstr *out_i, Uint time);
+void monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_i,
+ ErtsCodeMFA *out_i, Uint time);
void monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time);
void monitor_large_heap(Process *p);
void monitor_generic(Process *p, Eterm type, Eterm spec);
@@ -142,17 +141,18 @@ Uint erts_trace_flag2bit(Eterm flag);
int erts_trace_flags(Eterm List,
Uint *pMask, ErtsTracer *pTracer, int *pCpuTimestamp);
Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
+Eterm
+erts_bif_trace_epilogue(Process *p, Eterm result, int applying,
+ Export* ep, BeamInstr *cp, Uint32 flags,
+ Uint32 flags_meta, BeamInstr* I,
+ ErtsTracer meta_tracer);
-#ifdef ERTS_SMP
void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
-#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \
+#define ERTS_CHK_PEND_TRACE_MSGS(ESDP) \
do { \
if ((ESDP)->pending_trace_msgs) \
erts_send_pending_trace_msgs((ESDP)); \
} while (0)
-#else
-#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP)
-#endif
#define seq_trace_output(token, msg, type, receiver, process) \
seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL)
@@ -176,7 +176,7 @@ struct trace_pattern_flags {
};
extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
extern int erts_call_time_breakpoint_tracing;
-int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
+int erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
struct binary* match_prog_set,
struct binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
@@ -198,6 +198,8 @@ int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks,
ErtsPTabElementCommon *t_p);
int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p);
Eterm erts_tracer_to_term(Process *p, ErtsTracer tracer);
+Eterm erts_build_tracer_to_term(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, ErtsTracer tracer);
+
ErtsTracer erts_term_to_tracer(Eterm prefix, Eterm term);
void erts_tracer_replace(ErtsPTabElementCommon *t_p,
const ErtsTracer new_tracer);
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index bd5e1482fb..d225916ac5 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -79,23 +79,23 @@ void erts_init_unicode(void)
max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
/* Non visual BIFs to trap to. */
erts_init_trap_export(&characters_to_utf8_trap_exp,
- am_erlang, am_atom_put("characters_to_utf8_trap",23), 3,
+ am_erlang, ERTS_MAKE_AM("characters_to_utf8_trap"), 3,
&characters_to_utf8_trap);
erts_init_trap_export(&characters_to_list_trap_1_exp,
- am_erlang, am_atom_put("characters_to_list_trap_1",25), 3,
+ am_erlang, ERTS_MAKE_AM("characters_to_list_trap_1"), 3,
&characters_to_list_trap_1);
erts_init_trap_export(&characters_to_list_trap_2_exp,
- am_erlang, am_atom_put("characters_to_list_trap_2",25), 3,
+ am_erlang, ERTS_MAKE_AM("characters_to_list_trap_2"), 3,
&characters_to_list_trap_2);
erts_init_trap_export(&characters_to_list_trap_3_exp,
- am_erlang, am_atom_put("characters_to_list_trap_3",25), 3,
+ am_erlang, ERTS_MAKE_AM("characters_to_list_trap_3"), 3,
&characters_to_list_trap_3);
erts_init_trap_export(&characters_to_list_trap_4_exp,
- am_erlang, am_atom_put("characters_to_list_trap_4",25), 1,
+ am_erlang, ERTS_MAKE_AM("characters_to_list_trap_4"), 1,
&characters_to_list_trap_4);
c_to_b_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_binary_int,2);
@@ -123,19 +123,16 @@ static void cleanup_restart_context(RestartContext *rc)
}
}
-static void cleanup_restart_context_bin(Binary *bp)
+static int cleanup_restart_context_bin(Binary *bp)
{
RestartContext *rc = ERTS_MAGIC_BIN_DATA(bp);
cleanup_restart_context(rc);
+ return 1;
}
-static RestartContext *get_rc_from_bin(Eterm bin)
+static RestartContext *get_rc_from_bin(Eterm mref)
{
- Binary *mbp;
- ASSERT(ERTS_TERM_IS_MAGIC_BINARY(bin));
-
- mbp = ((ProcBin *) binary_val(bin))->val;
-
+ Binary *mbp = erts_magic_ref2bin(mref);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
== cleanup_restart_context_bin);
return (RestartContext *) ERTS_MAGIC_BIN_DATA(mbp);
@@ -147,9 +144,9 @@ static Eterm make_magic_bin_for_restart(Process *p, RestartContext *rc)
cleanup_restart_context_bin);
RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp);
Eterm *hp;
- memcpy(restartp,rc,sizeof(RestartContext));
- hp = HAlloc(p, PROC_BIN_SIZE);
- return erts_mk_magic_binary_term(&hp, &MSO(p), mbp);
+ sys_memcpy(restartp,rc,sizeof(RestartContext));
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &MSO(p), mbp);
}
@@ -257,12 +254,12 @@ static Uint copy_utf8_bin(byte *target, byte *source, Uint size,
ASSERT(need > 0);
ASSERT(from_source > 0);
if (size < from_source) {
- memcpy(leftover + (*num_leftovers), source, size);
+ sys_memcpy(leftover + (*num_leftovers), source, size);
*num_leftovers += size;
return 0;
}
/* leftover has room for four bytes (see bif) */
- memcpy(leftover + (*num_leftovers),source,from_source);
+ sys_memcpy(leftover + (*num_leftovers),source,from_source);
c = copy_utf8_bin(target, leftover, need, NULL, NULL, &tmp_err_pos, characters);
if (tmp_err_pos != 0) {
*err_pos = source;
@@ -294,7 +291,7 @@ static Uint copy_utf8_bin(byte *target, byte *source, Uint size,
size -= 2; copied += 2;
} else if (((*source) & ((byte) 0xF0)) == 0xE0) {
if (leftover && size < 3) {
- memcpy(leftover, source, (int) size);
+ sys_memcpy(leftover, source, (int) size);
*num_leftovers = (int) size;
break;
}
@@ -316,7 +313,7 @@ static Uint copy_utf8_bin(byte *target, byte *source, Uint size,
size -= 3; copied += 3;
} else if (((*source) & ((byte) 0xF8)) == 0xF0) {
if (leftover && size < 4) {
- memcpy(leftover, source, (int) size);
+ sys_memcpy(leftover, source, (int) size);
*num_leftovers = (int) size;
break;
}
@@ -1161,14 +1158,15 @@ static ERTS_INLINE int
analyze_utf8(byte *source, Uint size, byte **err_pos, Uint *num_chars, int *left,
Sint *num_latin1_chars, Uint max_chars)
{
+ int res = ERTS_UTF8_OK;
Uint latin1_count;
int is_latin1;
+ Uint nchars = 0;
*err_pos = source;
if (num_latin1_chars) {
is_latin1 = 1;
latin1_count = 0;
}
- *num_chars = 0;
while (size) {
if (((*source) & ((byte) 0x80)) == 0) {
source++;
@@ -1177,11 +1175,13 @@ analyze_utf8(byte *source, Uint size, byte **err_pos, Uint *num_chars, int *left
latin1_count++;
} else if (((*source) & ((byte) 0xE0)) == 0xC0) {
if (size < 2) {
- return ERTS_UTF8_INCOMPLETE;
+ res = ERTS_UTF8_INCOMPLETE;
+ break;
}
if (((source[1] & ((byte) 0xC0)) != 0x80) ||
((*source) < 0xC2) /* overlong */) {
- return ERTS_UTF8_ERROR;
+ res = ERTS_UTF8_ERROR;
+ break;
}
if (num_latin1_chars) {
latin1_count++;
@@ -1192,16 +1192,19 @@ analyze_utf8(byte *source, Uint size, byte **err_pos, Uint *num_chars, int *left
size -= 2;
} else if (((*source) & ((byte) 0xF0)) == 0xE0) {
if (size < 3) {
- return ERTS_UTF8_INCOMPLETE;
+ res = ERTS_UTF8_INCOMPLETE;
+ break;
}
if (((source[1] & ((byte) 0xC0)) != 0x80) ||
((source[2] & ((byte) 0xC0)) != 0x80) ||
(((*source) == 0xE0) && (source[1] < 0xA0)) /* overlong */ ) {
- return ERTS_UTF8_ERROR;
+ res = ERTS_UTF8_ERROR;
+ break;
}
if ((((*source) & ((byte) 0xF)) == 0xD) &&
((source[1] & 0x20) != 0)) {
- return ERTS_UTF8_ERROR;
+ res = ERTS_UTF8_ERROR;
+ break;
}
source += 3;
size -= 3;
@@ -1209,37 +1212,47 @@ analyze_utf8(byte *source, Uint size, byte **err_pos, Uint *num_chars, int *left
is_latin1 = 0;
} else if (((*source) & ((byte) 0xF8)) == 0xF0) {
if (size < 4) {
- return ERTS_UTF8_INCOMPLETE;
+ res = ERTS_UTF8_INCOMPLETE;
+ break;
}
if (((source[1] & ((byte) 0xC0)) != 0x80) ||
((source[2] & ((byte) 0xC0)) != 0x80) ||
((source[3] & ((byte) 0xC0)) != 0x80) ||
(((*source) == 0xF0) && (source[1] < 0x90)) /* overlong */) {
- return ERTS_UTF8_ERROR;
+ res = ERTS_UTF8_ERROR;
+ break;
}
if ((((*source) & ((byte)0x7)) > 0x4U) ||
((((*source) & ((byte)0x7)) == 0x4U) &&
((source[1] & ((byte)0x3F)) > 0xFU))) {
- return ERTS_UTF8_ERROR;
+ res = ERTS_UTF8_ERROR;
+ break;
}
source += 4;
size -= 4;
if (num_latin1_chars)
is_latin1 = 0;
} else {
- return ERTS_UTF8_ERROR;
+ res = ERTS_UTF8_ERROR;
+ break;
}
- ++(*num_chars);
+ ++nchars;
*err_pos = source;
- if (max_chars && size > 0 && *num_chars == max_chars)
- return ERTS_UTF8_OK_MAX_CHARS;
+ if (max_chars && size > 0 && nchars == max_chars) {
+ res = ERTS_UTF8_OK_MAX_CHARS;
+ break;
+ }
if (left && --(*left) <= 0 && size) {
- return ERTS_UTF8_ANALYZE_MORE;
+ res = ERTS_UTF8_ANALYZE_MORE;
+ break;
}
}
+
+ *num_chars = nchars;
if (num_latin1_chars)
*num_latin1_chars = is_latin1 ? latin1_count : -1;
- return ERTS_UTF8_OK;
+
+ return res;
}
int erts_analyze_utf8(byte *source, Uint size,
@@ -1255,29 +1268,18 @@ int erts_analyze_utf8_x(byte *source, Uint size,
return analyze_utf8(source, size, err_pos, num_chars, left, num_latin1_chars, max_chars);
}
-/*
- * No errors should be able to occur - no overlongs, no malformed, no nothing
- */
-static Eterm do_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz,
- Uint left,
- Uint *num_built, Uint *num_eaten, Eterm tail)
+static ERTS_INLINE Eterm
+make_list_from_utf8_buf(Eterm **hpp, Uint num,
+ byte *bytes, Uint sz,
+ Uint *num_built, Uint *num_eaten,
+ Eterm tail)
{
Eterm *hp;
Eterm ret;
+ Uint left = num;
byte *source, *ssource;
Uint unipoint;
-
- ASSERT(num > 0);
- if (left < num) {
- if (left > 0)
- num = left;
- else
- num = 1;
- }
-
- *num_built = num; /* Always */
-
- hp = HAlloc(p,num * 2);
+ hp = *hpp;
ret = tail;
source = bytes + sz;
ssource = source;
@@ -1305,20 +1307,97 @@ static Eterm do_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz,
}
ret = CONS(hp,make_small(unipoint),ret);
hp += 2;
- if (--num <= 0) {
+ if (--left <= 0) {
break;
}
}
+ *hpp = hp;
+ *num_built = num; /* Always */
*num_eaten = (ssource - source);
return ret;
}
+/*
+ * No errors should be able to occur - no overlongs, no malformed, no nothing
+ */
+static Eterm do_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz,
+ Uint left,
+ Uint *num_built, Uint *num_eaten, Eterm tail)
+{
+ Eterm *hp;
+
+ ASSERT(num > 0);
+ if (left < num) {
+ if (left > 0)
+ num = left;
+ else
+ num = 1;
+ }
+
+ hp = HAlloc(p,num * 2);
+
+ return make_list_from_utf8_buf(&hp, num, bytes, sz,
+ num_built, num_eaten,
+ tail);
+}
Eterm erts_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz, Uint left,
Uint *num_built, Uint *num_eaten, Eterm tail)
{
return do_utf8_to_list(p, num, bytes, sz, left, num_built, num_eaten, tail);
}
+Uint erts_atom_to_string_length(Eterm atom)
+{
+ Atom *ap;
+
+ ASSERT(is_atom(atom));
+ ap = atom_tab(atom_val(atom));
+
+ if (ap->latin1_chars >= 0)
+ return (Uint) ap->len;
+ else {
+ byte* err_pos;
+ Uint num_chars;
+#ifdef DEBUG
+ int ares =
+#endif
+ erts_analyze_utf8(ap->name, ap->len, &err_pos, &num_chars, NULL);
+ ASSERT(ares == ERTS_UTF8_OK);
+
+ return num_chars;
+ }
+}
+
+Eterm erts_atom_to_string(Eterm **hpp, Eterm atom)
+{
+ Atom *ap;
+
+ ASSERT(is_atom(atom));
+ ap = atom_tab(atom_val(atom));
+ if (ap->latin1_chars >= 0)
+ return buf_to_intlist(hpp, (char*)ap->name, ap->len, NIL);
+ else {
+ Eterm res;
+ byte* err_pos;
+ Uint num_chars, num_built, num_eaten;
+#ifdef DEBUG
+ Eterm *hp_start = *hpp;
+ int ares =
+#endif
+ erts_analyze_utf8(ap->name, ap->len, &err_pos, &num_chars, NULL);
+ ASSERT(ares == ERTS_UTF8_OK);
+
+ res = make_list_from_utf8_buf(hpp, num_chars, ap->name, ap->len,
+ &num_built, &num_eaten, NIL);
+
+ ASSERT(num_built == num_chars);
+ ASSERT(num_eaten == ap->len);
+ ASSERT(*hpp - hp_start == 2*num_chars);
+
+ return res;
+ }
+}
+
static int is_candidate(Uint cp)
{
int index,pos;
@@ -1890,74 +1969,57 @@ binary_to_atom(Process* proc, Eterm bin, Eterm enc, int must_exist)
byte* bytes;
byte *temp_alloc = NULL;
Uint bin_size;
+ Eterm a;
if ((bytes = erts_get_aligned_binary_bytes(bin, &temp_alloc)) == 0) {
BIF_ERROR(proc, BADARG);
}
bin_size = binary_size(bin);
+
if (enc == am_latin1) {
- Eterm a;
- if (bin_size > MAX_ATOM_CHARACTERS) {
- system_limit:
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(proc, SYSTEM_LIMIT);
- }
if (!must_exist) {
- a = erts_atom_put((byte *) bytes,
- bin_size,
- ERTS_ATOM_ENC_LATIN1,
- 0);
- erts_free_aligned_binary_bytes(temp_alloc);
- if (is_non_value(a))
- goto badarg;
- BIF_RET(a);
- } else if (erts_atom_get((char *)bytes, bin_size, &a, ERTS_ATOM_ENC_LATIN1)) {
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_RET(a);
- } else {
+ int lix = erts_atom_put_index((byte *) bytes,
+ bin_size,
+ ERTS_ATOM_ENC_LATIN1,
+ 0);
+ if (lix == ATOM_BAD_ENCODING_ERROR) {
+ badarg:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(proc, BADARG);
+ } else if (lix == ATOM_MAX_CHARS_ERROR) {
+ system_limit:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(proc, SYSTEM_LIMIT);
+ }
+
+ a = make_atom(lix);
+ } else if (!erts_atom_get((char *)bytes, bin_size, &a, ERTS_ATOM_ENC_LATIN1)) {
goto badarg;
}
- } else if (enc == am_utf8 || enc == am_unicode) {
- Eterm res;
- Uint num_chars = 0;
- const byte* p = bytes;
- Uint left = bin_size;
- while (left) {
- if (++num_chars > MAX_ATOM_CHARACTERS) {
+ } else if (enc == am_utf8 || enc == am_unicode) {
+ if (!must_exist) {
+ int uix = erts_atom_put_index((byte *) bytes,
+ bin_size,
+ ERTS_ATOM_ENC_UTF8,
+ 0);
+ if (uix == ATOM_BAD_ENCODING_ERROR) {
+ goto badarg;
+ } else if (uix == ATOM_MAX_CHARS_ERROR) {
goto system_limit;
}
- if ((p[0] & 0x80) == 0) {
- ++p;
- --left;
- }
- else if (left >= 2
- && (p[0] & 0xFE) == 0xC2 /* only allow latin1 subset */
- && (p[1] & 0xC0) == 0x80) {
- p += 2;
- left -= 2;
- }
- else goto badarg;
- }
- if (!must_exist) {
- res = erts_atom_put((byte *) bytes,
- bin_size,
- ERTS_ATOM_ENC_UTF8,
- 0);
+ a = make_atom(uix);
}
- else if (!erts_atom_get((char*)bytes, bin_size, &res, ERTS_ATOM_ENC_UTF8)) {
+ else if (!erts_atom_get((char*)bytes, bin_size, &a, ERTS_ATOM_ENC_UTF8)) {
goto badarg;
}
- erts_free_aligned_binary_bytes(temp_alloc);
- if (is_non_value(res))
- goto badarg;
- BIF_RET(res);
} else {
- badarg:
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(proc, BADARG);
+ goto badarg;
}
+
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_RET(a);
}
BIF_RETTYPE binary_to_atom_2(BIF_ALIST_2)
@@ -2008,7 +2070,7 @@ char *erts_convert_filename_to_encoding(Eterm name, char *statbuf, size_t statbu
is_list(name) ||
(allow_empty && is_nil(name))) {
Sint need;
- if ((need = erts_native_filename_need(name,encoding)) < 0) {
+ if ((need = erts_native_filename_need(name, encoding)) < 0) {
return NULL;
}
if (encoding == ERL_FILENAME_WIN_WCHAR) {
@@ -2046,7 +2108,7 @@ char *erts_convert_filename_to_encoding(Eterm name, char *statbuf, size_t statbu
} else {
name_buf = statbuf;
}
- memcpy(name_buf,bytes,size);
+ sys_memcpy(name_buf,bytes,size);
name_buf[size]=0;
} else {
name_buf = erts_convert_filename_to_wchar(bytes, size,
@@ -2103,18 +2165,9 @@ char* erts_convert_filename_to_wchar(byte* bytes, Uint size,
return name_buf;
}
-
-static int filename_len_16bit(byte *str)
-{
- byte *p = str;
- while(*p != '\0' || p[1] != '\0') {
- p += 2;
- }
- return (p - str);
-}
-Eterm erts_convert_native_to_filename(Process *p, byte *bytes)
+Eterm erts_convert_native_to_filename(Process *p, size_t size, byte *bytes)
{
- Uint size,num_chars;
+ Uint num_chars;
Eterm *hp;
byte *err_pos;
Uint num_built; /* characters */
@@ -2128,7 +2181,6 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes)
case ERL_FILENAME_UTF8_MAC:
mac = 1;
case ERL_FILENAME_UTF8:
- size = strlen((char *) bytes);
if (size == 0)
return NIL;
if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK) {
@@ -2143,7 +2195,6 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes)
}
return ret;
case ERL_FILENAME_WIN_WCHAR:
- size=filename_len_16bit(bytes);
if ((size % 2) != 0) { /* Panic fixup to avoid crashing the emulator */
size--;
hp = HAlloc(p, size+2);
@@ -2166,13 +2217,12 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes)
goto noconvert;
}
noconvert:
- size = strlen((char *) bytes);
hp = HAlloc(p, 2 * size);
return erts_bin_bytes_to_list(NIL, hp, bytes, size, 0);
}
-Sint erts_native_filename_need(Eterm ioterm, int encoding)
+Sint erts_native_filename_need(Eterm ioterm, int encoding)
{
Eterm *objp;
Eterm obj;
@@ -2214,6 +2264,20 @@ Sint erts_native_filename_need(Eterm ioterm, int encoding)
default:
need = -1;
}
+ /*
+ * Do not allow null in
+ * the middle of filenames
+ */
+ if (need > 0) {
+ byte *name = ap->name;
+ int len = ap->len;
+ for (i = 0; i < len; i++) {
+ if (name[i] == 0) {
+ need = -1;
+ break;
+ }
+ }
+ }
DESTROY_ESTACK(stack);
return need;
}
@@ -2244,6 +2308,14 @@ L_Again: /* Restart with sublist, old listend was pushed on stack */
if (is_small(obj)) { /* Always small */
for(;;) {
Uint x = unsigned_val(obj);
+ /*
+ * Do not allow null in
+ * the middle of filenames
+ */
+ if (x == 0) {
+ DESTROY_ESTACK(stack);
+ return ((Sint) -1);
+ }
switch (encoding) {
case ERL_FILENAME_LATIN1:
if (x > 255) {
@@ -2517,6 +2589,38 @@ void erts_copy_utf8_to_utf16_little(byte *target, byte *bytes, int num_chars)
}
/*
+ * *** Requirements on Raw Filename Format ***
+ *
+ * These requirements are due to the 'filename' module
+ * in stdlib. This since it is documented that it
+ * should be able to operate on raw filenames as well
+ * as ordinary filenames.
+ *
+ * A raw filename *must* be a byte sequence where:
+ * 1. Codepoints 0-127 (7-bit ascii) *must* be encoded
+ * as a byte with the corresponding value. That is,
+ * the most significant bit in the byte encoding the
+ * codepoint is never set.
+ * 2. Codepoints greater than 127 *must* be encoded
+ * with the most significant bit set in *every* byte
+ * encoding it.
+ *
+ * Latin1 and UTF-8 meet these requirements while
+ * UTF-16 and UTF-32 don't.
+ *
+ * On Windows filenames are natively stored as malformed
+ * UTF-16LE (lonely surrogates may appear). A more correct
+ * description than UTF-16 would be an array of 16-bit
+ * words... In order to meet the requirements of the
+ * raw file format we convert the malformed UTF-16LE to
+ * malformed UTF-8 which meet the requirements.
+ *
+ * Note that these requirements are today only OTP
+ * internal (erts-stdlib internal) requirements that
+ * could be changed.
+ */
+
+/*
* This internal bif converts a filename to whatever format is suitable for the file driver
* It also adds zero termination so that prim_file needn't bother with the character encoding
* of the file driver
@@ -2527,6 +2631,12 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1)
Sint need;
Eterm bin_term;
byte* bin_p;
+
+ /*
+ * See comment on "Requirements on Raw Filename Format"
+ * above.
+ */
+
/* Prim file explicitly does not allow atoms, although we could
very well cope with it. Instead of letting 'file' handle them,
it would probably be more efficient to handle them here. Subject to
@@ -2544,10 +2654,16 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1)
size = binary_size(BIF_ARG_1);
bytes = erts_get_aligned_binary_bytes(BIF_ARG_1, &temp_alloc);
if (encoding != ERL_FILENAME_WIN_WCHAR) {
+ Uint i;
/*Add 0 termination only*/
bin_term = new_binary(BIF_P, NULL, size+1);
bin_p = binary_bytes(bin_term);
- memcpy(bin_p,bytes,size);
+ for (i = 0; i < size; i++) {
+ /* Don't allow null in the middle of filenames... */
+ if (bytes[i] == 0)
+ goto bin_name_error;
+ bin_p[i] = bytes[i];
+ }
bin_p[size]=0;
erts_free_aligned_binary_bytes(temp_alloc);
BIF_RET(bin_term);
@@ -2561,6 +2677,9 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1)
bin_term = new_binary(BIF_P, 0, (size+1)*2);
bin_p = binary_bytes(bin_term);
while (size--) {
+ /* Don't allow null in the middle of filenames... */
+ if (*bytes == 0)
+ goto bin_name_error;
*bin_p++ = *bytes++;
*bin_p++ = 0;
}
@@ -2578,11 +2697,14 @@ BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1)
bin_p[num_chars*2+1] = 0;
erts_free_aligned_binary_bytes(temp_alloc);
BIF_RET(bin_term);
+ bin_name_error:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(BIF_P,BADARG);
} /* binary */
- if ((need = erts_native_filename_need(BIF_ARG_1,encoding)) < 0) {
- BIF_ERROR(BIF_P,BADARG);
+ if ((need = erts_native_filename_need(BIF_ARG_1, encoding)) < 0) {
+ BIF_ERROR(BIF_P,BADARG);
}
if (encoding == ERL_FILENAME_WIN_WCHAR) {
need += 2;
@@ -2616,6 +2738,11 @@ BIF_RETTYPE prim_file_internal_native2name_1(BIF_ALIST_1)
Eterm ret;
int mac = 0;
+ /*
+ * See comment on "Requirements on Raw Filename Format"
+ * above.
+ */
+
if (is_not_binary(BIF_ARG_1)) {
BIF_ERROR(BIF_P,BADARG);
}
diff --git a/erts/emulator/beam/erl_unicode.h b/erts/emulator/beam/erl_unicode.h
index e01eaa787e..31369fc8f9 100644
--- a/erts/emulator/beam/erl_unicode.h
+++ b/erts/emulator/beam/erl_unicode.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,4 +21,7 @@
#ifndef _ERL_UNICODE_H
#define _ERL_UNICODE_H
+Uint erts_atom_to_string_length(Eterm atom);
+Eterm erts_atom_to_string(Eterm **hpp, Eterm atom);
+
#endif /* _ERL_UNICODE_H */
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 81800752f0..880febba8b 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2012-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2012-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,15 +22,12 @@
#define ERL_UTILS_H__
#include "sys.h"
-#include "erl_smp.h"
+#include "atom.h"
#include "erl_printf.h"
struct process;
typedef struct {
-#ifdef DEBUG
- int smp_api;
-#endif
union {
Uint64 not_atomic;
erts_atomic64_t atomic;
@@ -38,70 +35,25 @@ typedef struct {
} erts_interval_t;
void erts_interval_init(erts_interval_t *);
-void erts_smp_interval_init(erts_interval_t *);
Uint64 erts_step_interval_nob(erts_interval_t *);
Uint64 erts_step_interval_relb(erts_interval_t *);
-Uint64 erts_smp_step_interval_nob(erts_interval_t *);
-Uint64 erts_smp_step_interval_relb(erts_interval_t *);
Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64);
Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64);
-Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64);
-Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64);
-ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *);
ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *);
ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE Uint64
-erts_current_interval_nob__(erts_interval_t *icp)
-{
- return (Uint64) erts_atomic64_read_nob(&icp->counter.atomic);
-}
-
-ERTS_GLB_INLINE Uint64
-erts_current_interval_acqb__(erts_interval_t *icp)
-{
- return (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic);
-}
-
-ERTS_GLB_INLINE Uint64
erts_current_interval_nob(erts_interval_t *icp)
{
- ASSERT(!icp->smp_api);
- return erts_current_interval_nob__(icp);
+ return (Uint64) erts_atomic64_read_nob(&icp->counter.atomic);
}
ERTS_GLB_INLINE Uint64
erts_current_interval_acqb(erts_interval_t *icp)
{
- ASSERT(!icp->smp_api);
- return erts_current_interval_acqb__(icp);
-}
-
-ERTS_GLB_INLINE Uint64
-erts_smp_current_interval_nob(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return erts_current_interval_nob__(icp);
-#else
- return icp->counter.not_atomic;
-#endif
-}
-
-ERTS_GLB_INLINE Uint64
-erts_smp_current_interval_acqb(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return erts_current_interval_acqb__(icp);
-#else
- return icp->counter.not_atomic;
-#endif
+ return (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic);
}
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
@@ -117,11 +69,10 @@ int erts_fit_in_bits_int32(Sint32);
int erts_fit_in_bits_uint(Uint);
Sint erts_list_length(Eterm);
int erts_is_builtin(Eterm, Eterm, int);
-Uint32 make_broken_hash(Eterm);
Uint32 block_hash(byte *, unsigned, Uint32);
Uint32 make_hash2(Eterm);
Uint32 make_hash(Eterm);
-Uint32 make_internal_hash(Eterm);
+Uint32 make_internal_hash(Eterm, Uint32 salt);
void erts_save_emu_args(int argc, char **argv);
Eterm erts_get_emu_args(struct process *c_p);
@@ -132,6 +83,7 @@ Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
+#define erts_bld_monotonic_time erts_bld_sint64
Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
#define erts_bld_tuple2(H,S,E1,E2) erts_bld_tuple(H,S,2,E1,E2)
@@ -140,7 +92,7 @@ Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
#define erts_bld_tuple5(H,S,E1,E2,E3,E4,E5) erts_bld_tuple(H,S,5,E1,E2,E3,E4,E5)
Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]);
Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len);
-#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str))
+#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,sys_strlen(str))
Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]);
Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp,
Sint length, Eterm terms1[], Uint terms2[]);
@@ -161,10 +113,12 @@ int eq(Eterm, Eterm);
#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
-int erts_cmp_atoms(Eterm a, Eterm b);
-Sint erts_cmp(Eterm, Eterm, int, int);
-Sint erts_cmp_compound(Eterm, Eterm, int, int);
+ERTS_GLB_INLINE Sint erts_cmp(Eterm, Eterm, int, int);
+ERTS_GLB_INLINE int erts_cmp_atoms(Eterm a, Eterm b);
+
Sint cmp(Eterm a, Eterm b);
+Sint erts_cmp_compound(Eterm, Eterm, int, int);
+
#define CMP(A,B) erts_cmp(A,B,0,0)
#define CMP_TERM(A,B) erts_cmp(A,B,1,0)
#define CMP_EQ_ONLY(A,B) erts_cmp(A,B,0,1)
@@ -199,4 +153,56 @@ Sint cmp(Eterm a, Eterm b);
if (erts_cmp_compound(X,Y,0,EqOnly) Op 0) { Action; }; \
}
+#define erts_float_comp(x,y) (((x)<(y)) ? -1 : (((x)==(y)) ? 0 : 1))
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int erts_cmp_atoms(Eterm a, Eterm b) {
+ Atom *aa = atom_tab(atom_val(a));
+ Atom *bb = atom_tab(atom_val(b));
+
+ byte *name_a, *name_b;
+ int len_a, len_b, diff;
+
+ diff = aa->ord0 - bb->ord0;
+
+ if (diff != 0) {
+ return diff;
+ }
+
+ name_a = &aa->name[3];
+ name_b = &bb->name[3];
+ len_a = aa->len-3;
+ len_b = bb->len-3;
+
+ if (len_a > 0 && len_b > 0) {
+ diff = sys_memcmp(name_a, name_b, MIN(len_a, len_b));
+
+ if (diff != 0) {
+ return diff;
+ }
+ }
+
+ return len_a - len_b;
+}
+
+ERTS_GLB_INLINE Sint erts_cmp(Eterm a, Eterm b, int exact, int eq_only) {
+ if (is_atom(a) && is_atom(b)) {
+ return erts_cmp_atoms(a, b);
+ } else if (is_both_small(a, b)) {
+ return (signed_val(a) - signed_val(b));
+ } else if (is_float(a) && is_float(b)) {
+ FloatDef af, bf;
+
+ GET_DOUBLE(a, af);
+ GET_DOUBLE(b, bf);
+
+ return erts_float_comp(af.fd, bf.fd);
+ }
+
+ return erts_cmp_compound(a,b,exact,eq_only);
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#endif
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index f97716d030..4089fac48e 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -36,7 +36,7 @@
#define EMULATOR "BEAM"
#define SEQ_TRACE 1
-#define CONTEXT_REDS 2000 /* Swap process out after this number */
+#define CONTEXT_REDS 4000 /* Swap process out after this number */
#define MAX_ARG 255 /* Max number of arguments allowed */
#define MAX_REG 1024 /* Max number of x(N) registers used */
@@ -46,8 +46,6 @@
*/
#define ERTS_X_REGS_ALLOCATED (MAX_REG+3)
-#define INPUT_REDUCTIONS (2 * CONTEXT_REDS)
-
#define H_DEFAULT_SIZE 233 /* default (heap + stack) min size */
#define VH_DEFAULT_SIZE 32768 /* default virtual (bin) heap min size (words) */
#define H_DEFAULT_MAX_SIZE 0 /* default max heap size is off */
@@ -55,7 +53,7 @@
#define CP_SIZE 1
#define ErtsHAllocLockCheck(P) \
- ERTS_SMP_LC_ASSERT(erts_dbg_check_halloc_lock((P)))
+ ERTS_LC_ASSERT(erts_dbg_check_halloc_lock((P)))
#ifdef DEBUG
@@ -71,7 +69,7 @@
# ifdef CHECK_FOR_HOLES
# define INIT_HEAP_MEM(p,sz) erts_set_hole_marker(HEAP_TOP(p), (sz))
# else
-# define INIT_HEAP_MEM(p,sz) memset(HEAP_TOP(p),0x01,(sz)*sizeof(Eterm*))
+# define INIT_HEAP_MEM(p,sz) sys_memset(HEAP_TOP(p),0x01,(sz)*sizeof(Eterm*))
# endif
#else
# define INIT_HEAP_MEM(p,sz) ((void)0)
@@ -102,16 +100,31 @@
if ((ptr) == (endp)) { \
; \
} else if (HEAP_START(p) <= (ptr) && (ptr) < HEAP_TOP(p)) { \
+ ASSERT(HEAP_TOP(p) == (endp)); \
HEAP_TOP(p) = (ptr); \
} else { \
- erts_heap_frag_shrink(p, ptr); \
+ ASSERT(MBUF(p)->mem + MBUF(p)->used_size == (endp)); \
+ erts_heap_frag_shrink(p, ptr); \
}
#define HeapWordsLeft(p) (HEAP_LIMIT(p) - HEAP_TOP(p))
#if defined(DEBUG) || defined(CHECK_FOR_HOLES)
-# define ERTS_HOLE_MARKER (((0xdeadbeef << 24) << 8) | 0xdeadbeef)
-#endif /* egil: 32-bit ? */
+
+/*
+ * ERTS_HOLE_MARKER must *not* be mistaken for a valid term
+ * on the heap...
+ */
+# ifdef ARCH_64
+# define ERTS_HOLE_MARKER \
+ make_catch(UWORD_CONSTANT(0xdeadbeaf00000000) >> _TAG_IMMED2_SIZE)
+/* Will (at the time of writing) appear as 0xdeadbeaf0000001b */
+# else
+# define ERTS_HOLE_MARKER \
+ make_catch(UWORD_CONSTANT(0xdead0000) >> _TAG_IMMED2_SIZE)
+/* Will (at the time of writing) appear as 0xdead001b */
+# endif
+#endif
/*
* Allocate heap memory on the ordinary heap, NEVER in a heap
@@ -144,13 +157,15 @@ typedef struct op_entry {
Uint32 mask[3]; /* Signature mask. */
unsigned involves_r; /* Needs special attention when matching. */
int sz; /* Number of loaded words. */
+ int adjust; /* Adjustment for start of instruction. */
char* pack; /* Instructions for packing engine. */
char* sign; /* Signature string. */
- unsigned count; /* Number of times executed. */
} OpEntry;
-extern OpEntry opc[]; /* Description of all instructions. */
-extern int num_instructions; /* Number of instruction in opc[]. */
+extern const OpEntry opc[]; /* Description of all instructions. */
+extern const int num_instructions; /* Number of instruction in opc[]. */
+
+extern Uint erts_instr_count[];
/* some constants for various table sizes etc */
@@ -185,4 +200,24 @@ extern int erts_pd_initial_size;/* Initial Process dictionary table size */
#include "erl_term.h"
+#if defined(NO_JUMP_TABLE)
+# define BeamOpsAreInitialized() (1)
+# define BeamOpCodeAddr(OpCode) ((BeamInstr)(OpCode))
+#else
+extern void** beam_ops;
+# define BeamOpsAreInitialized() (beam_ops != 0)
+# define BeamOpCodeAddr(OpCode) ((BeamInstr)beam_ops[(OpCode)])
+#endif
+
+#if defined(ARCH_64) && defined(CODE_MODEL_SMALL)
+# define BeamCodeAddr(InstrWord) ((BeamInstr)(Uint32)(InstrWord))
+# define BeamSetCodeAddr(InstrWord, Addr) (((InstrWord) & ~((1ull << 32)-1)) | (Addr))
+# define BeamExtraData(InstrWord) ((InstrWord) >> 32)
+#else
+# define BeamCodeAddr(InstrWord) ((BeamInstr)(InstrWord))
+# define BeamSetCodeAddr(InstrWord, Addr) (Addr)
+#endif
+
+#define BeamIsOpCode(InstrWord, OpCode) (BeamCodeAddr(InstrWord) == BeamOpCodeAddr(OpCode))
+
#endif /* __ERL_VM_H__ */
diff --git a/erts/emulator/beam/erlang_dtrace.d b/erts/emulator/beam/erlang_dtrace.d
index 237889e0f5..8792138d53 100644
--- a/erts/emulator/beam/erlang_dtrace.d
+++ b/erts/emulator/beam/erlang_dtrace.d
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2016.
+ * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2018.
* All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -55,7 +55,8 @@ provider erlang {
* @param sender the PID (string form) of the sender
* @param receiver the PID (string form) of the receiver
* @param size the size of the message being delivered (words)
- * @param token_label for the sender's sequential trace token
+ * @param token_label for the sender's sequential trace token. This will be
+ * INT_MIN if the label does not fit into a 32-bit integer.
* @param token_previous count for the sender's sequential trace token
* @param token_current count for the sender's sequential trace token
*/
@@ -73,7 +74,8 @@ provider erlang {
* @param node_name the Erlang node name (string form) of the receiver
* @param receiver the PID/name (string form) of the receiver
* @param size the size of the message being delivered (words)
- * @param token_label for the sender's sequential trace token
+ * @param token_label for the sender's sequential trace token. This will be
+ * INT_MIN if the label does not fit into a 32-bit integer.
* @param token_previous count for the sender's sequential trace token
* @param token_current count for the sender's sequential trace token
*/
@@ -98,7 +100,8 @@ provider erlang {
* @param receiver the PID (string form) of the receiver
* @param size the size of the message being delivered (words)
* @param queue_len length of the queue of the receiving process
- * @param token_label for the sender's sequential trace token
+ * @param token_label for the sender's sequential trace token. This will be
+ * INT_MIN if the label does not fit into a 32-bit integer.
* @param token_previous count for the sender's sequential trace token
* @param token_current count for the sender's sequential trace token
*/
@@ -122,7 +125,8 @@ provider erlang {
* @param receiver the PID (string form) of the receiver
* @param size the size of the message being delivered (words)
* @param queue_len length of the queue of the receiving process
- * @param token_label for the sender's sequential trace token
+ * @param token_label for the sender's sequential trace token. This will be
+ * INT_MIN if the label does not fit into a 32-bit integer.
* @param token_previous count for the sender's sequential trace token
* @param token_current count for the sender's sequential trace token
*/
@@ -273,7 +277,8 @@ provider erlang {
* @param node_name the Erlang node name (string form) of the receiver
* @param receiver the PID (string form) of the process receiving EXIT signal
* @param reason the reason for the exit (may be truncated)
- * @param token_label for the sender's sequential trace token
+ * @param token_label for the sender's sequential trace token. This will be
+ * INT_MIN if the label does not fit into a 32-bit integer.
* @param token_previous count for the sender's sequential trace token
* @param token_current count for the sender's sequential trace token
*/
@@ -634,72 +639,6 @@ provider erlang {
*/
probe aio_pool__get(char *, int);
- /* Probes for efile_drv.c */
-
- /**
- * Entry into the efile_drv.c file I/O driver
- *
- * For a list of command numbers used by this driver, see the section
- * "Guide to efile_drv.c probe arguments" in ../../../HOWTO/DTRACE.md.
- * That section also contains explanation of the various integer and
- * string arguments that may be present when any particular probe fires.
- *
- * NOTE: Not all Linux platforms (using SystemTap) can support
- * arguments beyond arg9.
- *
- *
- * TODO: Adding the port string, args[10], is a pain. Making that
- * port string available to all the other efile_drv.c probes
- * will be more pain. Is the pain worth it? If yes, then
- * add them everywhere else and grit our teeth. If no, then
- * rip it out.
- *
- * @param thread-id number of the scheduler Pthread arg0
- * @param tag number: {thread-id, tag} uniquely names a driver operation
- * @param user-tag string arg2
- * @param command number arg3
- * @param string argument 1 arg4
- * @param string argument 2 arg5
- * @param integer argument 1 arg6
- * @param integer argument 2 arg7
- * @param integer argument 3 arg8
- * @param integer argument 4 arg9
- * @param port the port ID of the busy port args[10]
- */
- probe efile_drv__entry(int, int, char *, int, char *, char *,
- int64_t, int64_t, int64_t, int64_t, char *);
-
- /**
- * Entry into the driver's internal work function. Computation here
- * is performed by a async worker pool Pthread.
- *
- * @param thread-id number
- * @param tag number
- * @param command number
- */
- probe efile_drv__int_entry(int, int, int);
-
- /**
- * Return from the driver's internal work function.
- *
- * @param thread-id number
- * @param tag number
- * @param command number
- */
- probe efile_drv__int_return(int, int, int);
-
- /**
- * Return from the efile_drv.c file I/O driver
- *
- * @param thread-id number arg0
- * @param tag number arg1
- * @param user-tag string arg2
- * @param command number arg3
- * @param Success? 1 is success, 0 is failure arg4
- * @param If failure, the errno of the error. arg5
- */
- probe efile_drv__return(int, int, char *, int, int, int);
-
/*
* The set of probes called by the erlang tracer nif backend. In order
diff --git a/erts/emulator/beam/erlang_lttng.h b/erts/emulator/beam/erlang_lttng.h
index 4e869671f7..9b93d77f6e 100644
--- a/erts/emulator/beam/erlang_lttng.h
+++ b/erts/emulator/beam/erlang_lttng.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -159,21 +159,6 @@ TRACEPOINT_EVENT(
TRACEPOINT_EVENT(
org_erlang_otp,
- driver_event,
- TP_ARGS(
- char*, pid,
- char*, port,
- char*, driver
- ),
- TP_FIELDS(
- ctf_string(pid, pid)
- ctf_string(port, port)
- ctf_string(driver, driver)
- )
-)
-
-TRACEPOINT_EVENT(
- org_erlang_otp,
driver_timeout,
TP_ARGS(
char*, pid,
diff --git a/erts/emulator/beam/error.h b/erts/emulator/beam/error.h
index 6c33b12dd0..64c08b1570 100644
--- a/erts/emulator/beam/error.h
+++ b/erts/emulator/beam/error.h
@@ -21,6 +21,8 @@
#ifndef __ERROR_H__
#define __ERROR_H__
+#include "code_ix.h"
+
/*
* There are three primary exception classes:
*
@@ -37,14 +39,11 @@
*/
/*
- * Bits 0-1 index the 'exception class tag' table.
- */
-#define EXC_CLASSBITS 3
-#define GET_EXC_CLASS(x) ((x) & EXC_CLASSBITS)
-
-/*
* Exception class tags (indices into the 'exception_tag' array)
*/
+#define EXTAG_OFFSET 0
+#define EXTAG_BITS 2
+
#define EXTAG_ERROR 0
#define EXTAG_EXIT 1
#define EXTAG_THROWN 2
@@ -52,20 +51,31 @@
#define NUMBER_EXC_TAGS 3 /* The number of exception class tags */
/*
- * Exit code flags (bits 2-7)
+ * Index to the 'exception class tag' table.
+ */
+#define EXC_CLASSBITS ((1<<EXTAG_BITS)-1)
+#define GET_EXC_CLASS(x) ((x) & EXC_CLASSBITS)
+
+/*
+ * Exit code flags
*
* These flags make is easier and quicker to decide what to do with the
* exception in the early stages, before a handler is found, and also
* maintains some separation between the class tag and the actions.
*/
-#define EXF_PANIC (1<<2) /* ignore catches */
-#define EXF_THROWN (1<<3) /* nonlocal return */
-#define EXF_LOG (1<<4) /* write to logger on termination */
-#define EXF_NATIVE (1<<5) /* occurred in native code */
-#define EXF_SAVETRACE (1<<6) /* save stack trace in internal form */
-#define EXF_ARGLIST (1<<7) /* has arglist for top of trace */
+#define EXF_OFFSET EXTAG_BITS
+#define EXF_BITS 7
-#define EXC_FLAGBITS 0x00fc
+#define EXF_PANIC (1<<(0+EXF_OFFSET)) /* ignore catches */
+#define EXF_THROWN (1<<(1+EXF_OFFSET)) /* nonlocal return */
+#define EXF_LOG (1<<(2+EXF_OFFSET)) /* write to logger on termination */
+#define EXF_NATIVE (1<<(3+EXF_OFFSET)) /* occurred in native code */
+#define EXF_SAVETRACE (1<<(4+EXF_OFFSET)) /* save stack trace in internal form */
+#define EXF_ARGLIST (1<<(5+EXF_OFFSET)) /* has arglist for top of trace */
+#define EXF_RESTORE_NIF (1<<(6+EXF_OFFSET)) /* restore original bif/nif */
+
+#define EXC_FLAGBITS (((1<<(EXF_BITS+EXF_OFFSET))-1) \
+ & ~((1<<(EXF_OFFSET))-1))
/*
* The primary fields of an exception code
@@ -75,11 +85,16 @@
#define NATIVE_EXCEPTION(x) ((x) | EXF_NATIVE)
/*
- * Bits 8-12 of the error code are used for indexing into
+ * Error code used for indexing into
* the short-hand error descriptor table.
*/
-#define EXC_INDEXBITS 0x1f00
-#define GET_EXC_INDEX(x) (((x) & EXC_INDEXBITS) >> 8)
+#define EXC_OFFSET (EXF_OFFSET+EXF_BITS)
+#define EXC_BITS 5
+
+#define EXC_INDEXBITS (((1<<(EXC_BITS+EXC_OFFSET))-1) \
+ & ~((1<<(EXC_OFFSET))-1))
+
+#define GET_EXC_INDEX(x) (((x) & EXC_INDEXBITS) >> EXC_OFFSET)
/*
* Exit codes used for raising a fresh exception. The primary exceptions
@@ -105,46 +120,46 @@
/* Error with given arglist term
* (exit reason in p->fvalue) */
-#define EXC_NORMAL ((1 << 8) | EXC_EXIT)
+#define EXC_NORMAL ((1 << EXC_OFFSET) | EXC_EXIT)
/* Normal exit (reason 'normal') */
-#define EXC_INTERNAL_ERROR ((2 << 8) | EXC_ERROR | EXF_PANIC)
+#define EXC_INTERNAL_ERROR ((2 << EXC_OFFSET) | EXC_ERROR | EXF_PANIC)
/* Things that shouldn't happen */
-#define EXC_BADARG ((3 << 8) | EXC_ERROR)
+#define EXC_BADARG ((3 << EXC_OFFSET) | EXC_ERROR)
/* Bad argument to a BIF */
-#define EXC_BADARITH ((4 << 8) | EXC_ERROR)
+#define EXC_BADARITH ((4 << EXC_OFFSET) | EXC_ERROR)
/* Bad arithmetic */
-#define EXC_BADMATCH ((5 << 8) | EXC_ERROR)
+#define EXC_BADMATCH ((5 << EXC_OFFSET) | EXC_ERROR)
/* Bad match in function body */
-#define EXC_FUNCTION_CLAUSE ((6 << 8) | EXC_ERROR)
+#define EXC_FUNCTION_CLAUSE ((6 << EXC_OFFSET) | EXC_ERROR)
/* No matching function head */
-#define EXC_CASE_CLAUSE ((7 << 8) | EXC_ERROR)
+#define EXC_CASE_CLAUSE ((7 << EXC_OFFSET) | EXC_ERROR)
/* No matching case clause */
-#define EXC_IF_CLAUSE ((8 << 8) | EXC_ERROR)
+#define EXC_IF_CLAUSE ((8 << EXC_OFFSET) | EXC_ERROR)
/* No matching if clause */
-#define EXC_UNDEF ((9 << 8) | EXC_ERROR)
+#define EXC_UNDEF ((9 << EXC_OFFSET) | EXC_ERROR)
/* No farity that matches */
-#define EXC_BADFUN ((10 << 8) | EXC_ERROR)
+#define EXC_BADFUN ((10 << EXC_OFFSET) | EXC_ERROR)
/* Not an existing fun */
-#define EXC_BADARITY ((11 << 8) | EXC_ERROR)
+#define EXC_BADARITY ((11 << EXC_OFFSET) | EXC_ERROR)
/* Attempt to call fun with
* wrong number of arguments. */
-#define EXC_TIMEOUT_VALUE ((12 << 8) | EXC_ERROR)
+#define EXC_TIMEOUT_VALUE ((12 << EXC_OFFSET) | EXC_ERROR)
/* Bad time out value */
-#define EXC_NOPROC ((13 << 8) | EXC_ERROR)
+#define EXC_NOPROC ((13 << EXC_OFFSET) | EXC_ERROR)
/* No process or port */
-#define EXC_NOTALIVE ((14 << 8) | EXC_ERROR)
+#define EXC_NOTALIVE ((14 << EXC_OFFSET) | EXC_ERROR)
/* Not distributed */
-#define EXC_SYSTEM_LIMIT ((15 << 8) | EXC_ERROR)
+#define EXC_SYSTEM_LIMIT ((15 << EXC_OFFSET) | EXC_ERROR)
/* Ran out of something */
-#define EXC_TRY_CLAUSE ((16 << 8) | EXC_ERROR)
+#define EXC_TRY_CLAUSE ((16 << EXC_OFFSET) | EXC_ERROR)
/* No matching try clause */
-#define EXC_NOTSUP ((17 << 8) | EXC_ERROR)
+#define EXC_NOTSUP ((17 << EXC_OFFSET) | EXC_ERROR)
/* Not supported */
-#define EXC_BADMAP ((18 << 8) | EXC_ERROR)
+#define EXC_BADMAP ((18 << EXC_OFFSET) | EXC_ERROR)
/* Bad map */
-#define EXC_BADKEY ((19 << 8) | EXC_ERROR)
+#define EXC_BADKEY ((19 << EXC_OFFSET) | EXC_ERROR)
/* Bad key in map */
#define NUMBER_EXIT_CODES 20 /* The number of exit code indices */
@@ -152,7 +167,7 @@
/*
* Internal pseudo-error codes.
*/
-#define TRAP (1 << 8) /* BIF Trap to erlang code */
+#define TRAP (1 << EXC_OFFSET) /* BIF Trap to erlang code */
/*
* Aliases for some common exit codes.
@@ -197,7 +212,7 @@ struct StackTrace {
Eterm header; /* bignum header - must be first in struct */
Eterm freason; /* original exception reason is saved in the struct */
BeamInstr* pc;
- BeamInstr* current;
+ ErtsCodeMFA* current;
int depth; /* number of saved pointers in trace[] */
BeamInstr *trace[1]; /* varying size - must be last in struct */
};
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 2a19211987..946ffeffb8 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -41,16 +41,13 @@
static IndexTable export_tables[ERTS_NUM_CODE_IX]; /* Active not locked */
-static erts_smp_atomic_t total_entries_bytes;
-
-#include "erl_smp.h"
+static erts_atomic_t total_entries_bytes;
/* This lock protects the staging export table from concurrent access
* AND it protects the staging table from becoming active.
*/
-erts_smp_mtx_t export_staging_lock;
+erts_mtx_t export_staging_lock;
-extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_call_traced_function;
struct export_entry
@@ -83,19 +80,15 @@ static struct export_blob* entry_to_blob(struct export_entry* ee)
}
void
-export_info(int to, void *to_arg)
+export_info(fmtfn_t to, void *to_arg)
{
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
export_staging_lock();
-#endif
index_info(to, to_arg, &export_tables[erts_active_code_ix()]);
hash_info(to, to_arg, &export_tables[erts_staging_code_ix()].htable);
-#ifdef ERTS_SMP
if (lock)
export_staging_unlock();
-#endif
}
@@ -103,7 +96,8 @@ static HashValue
export_hash(struct export_entry* ee)
{
Export* x = ee->ep;
- return EXPORT_HASH(x->code[0], x->code[1], x->code[2]);
+ return EXPORT_HASH(x->info.mfa.module, x->info.mfa.function,
+ x->info.mfa.arity);
}
static int
@@ -111,9 +105,9 @@ export_cmp(struct export_entry* tmpl_e, struct export_entry* obj_e)
{
Export* tmpl = tmpl_e->ep;
Export* obj = obj_e->ep;
- return !(tmpl->code[0] == obj->code[0] &&
- tmpl->code[1] == obj->code[1] &&
- tmpl->code[2] == obj->code[2]);
+ return !(tmpl->info.mfa.module == obj->info.mfa.module &&
+ tmpl->info.mfa.function == obj->info.mfa.function &&
+ tmpl->info.mfa.arity == obj->info.mfa.arity);
}
@@ -128,23 +122,28 @@ export_alloc(struct export_entry* tmpl_e)
Export* obj;
blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob));
- erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob));
+ erts_atomic_add_nob(&total_entries_bytes, sizeof(*blob));
obj = &blob->exp;
- obj->fake_op_func_info_for_hipe[0] = 0;
- obj->fake_op_func_info_for_hipe[1] = 0;
- obj->code[0] = tmpl->code[0];
- obj->code[1] = tmpl->code[1];
- obj->code[2] = tmpl->code[2];
- obj->code[3] = (BeamInstr) em_call_error_handler;
- obj->code[4] = 0;
+ obj->info.op = 0;
+ obj->info.u.gen_bp = NULL;
+ obj->info.mfa.module = tmpl->info.mfa.module;
+ obj->info.mfa.function = tmpl->info.mfa.function;
+ obj->info.mfa.arity = tmpl->info.mfa.arity;
+ obj->beam[0] = 0;
+ if (BeamOpsAreInitialized()) {
+ obj->beam[0] = BeamOpCodeAddr(op_call_error_handler);
+ }
+ obj->beam[1] = 0;
for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) {
- obj->addressv[ix] = obj->code+3;
+ obj->addressv[ix] = obj->beam;
blob->entryv[ix].slot.index = -1;
blob->entryv[ix].ep = &blob->exp;
}
ix = 0;
+
+ DBG_TRACE_MFA_P(&obj->info.mfa, "export allocation at %p", obj);
}
else { /* Existing entry in another table, use free entry in blob */
blob = entry_to_blob(tmpl_e);
@@ -163,11 +162,14 @@ export_free(struct export_entry* obj)
obj->slot.index = -1;
for (i=0; i < ERTS_NUM_CODE_IX; i++) {
if (blob->entryv[i].slot.index >= 0) {
+ DBG_TRACE_MFA_P(&blob->exp.info.mfa, "export entry slot %u freed for %p",
+ (obj - blob->entryv), &blob->exp);
return;
}
}
+ DBG_TRACE_MFA_P(&blob->exp.info.mfa, "export blob deallocation at %p", &blob->exp);
erts_free(ERTS_ALC_T_EXPORT, blob);
- erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob));
+ erts_atomic_add_nob(&total_entries_bytes, -sizeof(*blob));
}
void
@@ -176,8 +178,9 @@ init_export_table(void)
HashFunctions f;
int i;
- erts_smp_mtx_init(&export_staging_lock, "export_tab");
- erts_smp_atomic_init_nob(&total_entries_bytes, 0);
+ erts_mtx_init(&export_staging_lock, "export_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ erts_atomic_init_nob(&total_entries_bytes, 0);
f.hash = (H_FUN) export_hash;
f.cmp = (HCMP_FUN) export_cmp;
@@ -224,7 +227,9 @@ erts_find_export_entry(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
while (b != (HashBucket*) 0) {
Export* ep = ((struct export_entry*) b)->ep;
- if (ep->code[0] == m && ep->code[1] == f && ep->code[2] == a) {
+ if (ep->info.mfa.module == m &&
+ ep->info.mfa.function == f &&
+ ep->info.mfa.arity == a) {
return ep;
}
b = b->next;
@@ -237,9 +242,9 @@ static struct export_entry* init_template(struct export_templ* templ,
{
templ->entry.ep = &templ->exp;
templ->entry.slot.index = -1;
- templ->exp.code[0] = m;
- templ->exp.code[1] = f;
- templ->exp.code[2] = a;
+ templ->exp.info.mfa.module = m;
+ templ->exp.info.mfa.function = f;
+ templ->exp.info.mfa.arity = a;
return &templ->entry;
}
@@ -263,8 +268,8 @@ erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a));
if (ee == NULL ||
- (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
- ee->ep->code[3] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) {
+ (ee->ep->addressv[code_ix] == ee->ep->beam &&
+ ! BeamIsOpCode(ee->ep->beam[0], op_i_generic_breakpoint))) {
return NULL;
}
return ee->ep;
@@ -348,7 +353,7 @@ Export *export_list(int i, ErtsCodeIndex code_ix)
int export_list_size(ErtsCodeIndex code_ix)
{
- return export_tables[code_ix].entries;
+ return erts_index_num_entries(&export_tables[code_ix]);
}
int export_table_sz(void)
@@ -364,7 +369,7 @@ int export_table_sz(void)
}
int export_entries_sz(void)
{
- return erts_smp_atomic_read_nob(&total_entries_bytes);
+ return erts_atomic_read_nob(&total_entries_bytes);
}
Export *export_get(Export *e)
{
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index 1e7bb8514b..ae8dfa4cf8 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -33,27 +33,25 @@ typedef struct export
{
void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */
- BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
+ ErtsCodeInfo info; /* MUST be just before beam[] */
+
/*
- * code[0]: Tagged atom for module.
- * code[1]: Tagged atom for function.
- * code[2]: Arity (untagged integer).
- * code[3]: This entry is 0 unless the 'address' field points to it.
+ * beam[0]: This entry is 0 unless the 'addressv' field points to it.
* Threaded code instruction to load function
* (em_call_error_handler), execute BIF (em_apply_bif),
* or a breakpoint instruction (op_i_generic_breakpoint).
- * code[4]: Function pointer to BIF function (for BIFs only),
+ * beam[1]: Function pointer to BIF function (for BIFs only),
* or pointer to threaded code if the module has an
* on_load function that has not been run yet, or pointer
- * to code for function code[3] is a breakpont instruction.
+ * to code if function beam[0] is a breakpoint instruction.
* Otherwise: 0.
*/
- BeamInstr code[5];
+ BeamInstr beam[2];
} Export;
void init_export_table(void);
-void export_info(int, void *);
+void export_info(fmtfn_t, void *);
ERTS_GLB_INLINE Export* erts_active_export_entry(Eterm m, Eterm f, unsigned a);
Export* erts_export_put(Eterm mod, Eterm func, unsigned int arity);
@@ -68,14 +66,14 @@ Export *export_get(Export*);
void export_start_staging(void);
void export_end_staging(int commit);
-extern erts_smp_mtx_t export_staging_lock;
-#define export_staging_lock() erts_smp_mtx_lock(&export_staging_lock)
-#define export_staging_unlock() erts_smp_mtx_unlock(&export_staging_lock)
+extern erts_mtx_t export_staging_lock;
+#define export_staging_lock() erts_mtx_lock(&export_staging_lock)
+#define export_staging_unlock() erts_mtx_unlock(&export_staging_lock)
#include "beam_load.h" /* For em_* extern declarations */
#define ExportIsBuiltIn(EntryPtr) \
-(((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->code + 3) && \
- ((EntryPtr)->code[3] == (BeamInstr) em_apply_bif))
+(((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->beam) && \
+ (BeamIsOpCode((EntryPtr)->beam[0], op_apply_bif)))
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index beed847578..9a66e491f3 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -122,8 +122,9 @@ static int encode_size_struct_int(struct TTBSizeContext_*, ErtsAtomCacheMap *acm
static Export binary_to_term_trap_export;
static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1);
-static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b,
- Export *bif, Eterm arg0, Eterm arg1);
+static Sint transcode_dist_obuf(ErtsDistOutputBuf*, DistEntry*, Uint32 dflags, Sint reds);
+
+
void erts_init_external(void) {
erts_init_trap_export(&term_to_binary_trap_export,
@@ -222,23 +223,10 @@ erts_destroy_atom_cache_map(ErtsAtomCacheMap *acmp)
static ERTS_INLINE void
insert_acache_map(ErtsAtomCacheMap *acmp, Eterm atom, Uint32 dflags)
{
- /*
- * If the receiver do not understand utf8 atoms
- * and this atom cannot be represented in latin1,
- * we are not allowed to cache it.
- *
- * In this case all atoms are assumed to have
- * latin1 encoding in the cache. By refusing it
- * in the cache we will instead encode it using
- * ATOM_UTF8_EXT/SMALL_ATOM_UTF8_EXT which the
- * receiver do not recognize and tear down the
- * connection.
- */
- if (acmp && acmp->sz < ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES
- && ((dflags & DFLAG_UTF8_ATOMS)
- || atom_tab(atom_val(atom))->latin1_chars >= 0)) {
+ if (acmp && acmp->sz < ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES) {
int ix;
ASSERT(acmp->hdr_sz < 0);
+ ASSERT(dflags & DFLAG_UTF8_ATOMS);
ix = atom2cix(atom);
if (acmp->cache[ix].iix < 0) {
acmp->cache[ix].iix = acmp->sz;
@@ -258,9 +246,7 @@ get_iix_acache_map(ErtsAtomCacheMap *acmp, Eterm atom, Uint32 dflags)
ASSERT(is_atom(atom));
ix = atom2cix(atom);
if (acmp->cache[ix].iix < 0) {
- ASSERT(acmp->sz == ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES
- || (!(dflags & DFLAG_UTF8_ATOMS)
- && atom_tab(atom_val(atom))->latin1_chars < 0));
+ ASSERT(acmp->sz == ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES);
return -1;
}
else {
@@ -274,7 +260,6 @@ void
erts_finalize_atom_cache_map(ErtsAtomCacheMap *acmp, Uint32 dflags)
{
if (acmp) {
- int utf8_atoms = (int) (dflags & DFLAG_UTF8_ATOMS);
int long_atoms = 0; /* !0 if one or more atoms are longer than 255. */
int i;
int sz;
@@ -285,6 +270,7 @@ erts_finalize_atom_cache_map(ErtsAtomCacheMap *acmp, Uint32 dflags)
+ 1 /* number of internal cache entries */
;
int min_sz;
+ ASSERT(dflags & DFLAG_UTF8_ATOMS);
ASSERT(acmp->hdr_sz < 0);
/* Make sure cache update instructions fit */
min_sz = fix_sz+(2+4)*acmp->sz;
@@ -296,7 +282,7 @@ erts_finalize_atom_cache_map(ErtsAtomCacheMap *acmp, Uint32 dflags)
atom = acmp->cache[acmp->cix[i]].atom;
ASSERT(is_atom(atom));
a = atom_tab(atom_val(atom));
- len = (int) (utf8_atoms ? a->len : a->latin1_chars);
+ len = (int) a->len;
ASSERT(len >= 0);
if (!long_atoms && len > 255)
long_atoms = 1;
@@ -366,18 +352,77 @@ byte *erts_encode_ext_dist_header_setup(byte *ctl_ext, ErtsAtomCacheMap *acmp)
}
}
-byte *erts_encode_ext_dist_header_finalize(byte *ext, ErtsAtomCache *cache, Uint32 dflags)
+
+#define PASS_THROUGH 'p'
+
+Sint erts_encode_ext_dist_header_finalize(ErtsDistOutputBuf* ob,
+ DistEntry* dep,
+ Uint32 dflags,
+ Sint reds)
{
byte *ip;
byte instr_buf[(2+4)*ERTS_ATOM_CACHE_SIZE];
int ci, sz;
byte dist_hdr_flags;
int long_atoms;
- int utf8_atoms = (int) (dflags & DFLAG_UTF8_ATOMS);
- register byte *ep = ext;
- ASSERT(ep[0] == VERSION_MAGIC);
- if (ep[1] != DIST_HEADER)
- return ext;
+ register byte *ep = ob->extp;
+ ASSERT(dflags & DFLAG_UTF8_ATOMS);
+
+ /*
+ * The buffer can have different layouts at this point depending on
+ * what was known when encoded:
+ *
+ * Pending connection: CtrlTerm [, MsgTerm]
+ * With atom cache : VERSION_MAGIC, DIST_HEADER, ..., CtrlTerm [, MsgTerm]
+ * No atom cache : VERSION_MAGIC, CtrlTerm [, VERSION_MAGIC, MsgTerm]
+ */
+
+ if (ep[0] != VERSION_MAGIC || dep->transcode_ctx) {
+ /*
+ * Was encoded without atom cache toward pending connection.
+ */
+ ASSERT(ep[0] == SMALL_TUPLE_EXT || ep[0] == LARGE_TUPLE_EXT);
+
+ if (~dflags & (DFLAG_DIST_MONITOR | DFLAG_DIST_MONITOR_NAME)
+ && ep[0] == SMALL_TUPLE_EXT
+ && ep[1] == 4
+ && ep[2] == SMALL_INTEGER_EXT
+ && (ep[3] == DOP_MONITOR_P ||
+ ep[3] == DOP_MONITOR_P_EXIT ||
+ ep[3] == DOP_DEMONITOR_P)) {
+ /*
+ * Receiver does not support process monitoring.
+ * Suppress monitor control msg (see erts_dsig_send_monitor)
+ * by converting it to an empty (tick) packet.
+ */
+ ob->ext_endp = ob->extp;
+ return reds;
+ }
+ if (~dflags & (DFLAG_BIT_BINARIES | DFLAG_EXPORT_PTR_TAG
+ | DFLAG_DIST_HDR_ATOM_CACHE)) {
+ reds = transcode_dist_obuf(ob, dep, dflags, reds);
+ if (reds < 0)
+ return reds;
+ ep = ob->extp;
+ }
+ if (dflags & DFLAG_DIST_HDR_ATOM_CACHE) {
+ /*
+ * Encoding was done without atom caching but receiver expects
+ * a dist header, so we prepend an empty one.
+ */
+ *--ep = 0; /* NumberOfAtomCacheRefs */
+ *--ep = DIST_HEADER;
+ *--ep = VERSION_MAGIC;
+ }
+ goto done;
+ }
+ else if (ep[1] != DIST_HEADER) {
+ ASSERT(ep[1] == SMALL_TUPLE_EXT || ep[1] == LARGE_TUPLE_EXT);
+ ASSERT(!(dflags & DFLAG_DIST_HDR_ATOM_CACHE));
+ /* Node without atom cache, 'pass through' needed */
+ *--ep = PASS_THROUGH;
+ goto done;
+ }
dist_hdr_flags = ep[2];
long_atoms = ERTS_DIST_HDR_LONG_ATOMS_FLG & ((int) dist_hdr_flags);
@@ -405,6 +450,7 @@ byte *erts_encode_ext_dist_header_finalize(byte *ext, ErtsAtomCache *cache, Uint
/ sizeof(Uint32))+1];
register Uint32 flgs;
int iix, flgs_bytes, flgs_buf_ix, used_half_bytes;
+ ErtsAtomCache* cache = dep->cache;
#ifdef DEBUG
int tot_used_half_bytes;
#endif
@@ -447,17 +493,9 @@ byte *erts_encode_ext_dist_header_finalize(byte *ext, ErtsAtomCache *cache, Uint
Atom *a;
cache->out_arr[cix] = atom;
a = atom_tab(atom_val(atom));
- if (utf8_atoms) {
- sz = a->len;
- ep -= sz;
- sys_memcpy((void *) ep, (void *) a->name, sz);
- }
- else {
- ASSERT(0 <= a->latin1_chars && a->latin1_chars <= MAX_ATOM_CHARACTERS);
- ep -= a->latin1_chars;
- sz = erts_utf8_to_latin1(ep, a->name, a->len);
- ASSERT(a->latin1_chars == sz);
- }
+ sz = a->len;
+ ep -= sz;
+ sys_memcpy((void *) ep, (void *) a->name, sz);
if (long_atoms) {
ep -= 2;
put_int16(sz, ep);
@@ -504,12 +542,16 @@ byte *erts_encode_ext_dist_header_finalize(byte *ext, ErtsAtomCache *cache, Uint
break;
}
}
+ reds -= 3; /*was ERTS_PORT_REDS_DIST_CMD_FINALIZE*/
}
--ep;
put_int8(ci, ep);
*--ep = DIST_HEADER;
*--ep = VERSION_MAGIC;
- return ep;
+done:
+ ob->extp = ep;
+ ASSERT(&ob->data[0] <= ob->extp && ob->extp < ob->ext_endp);
+ return reds < 0 ? 0 : reds;
}
int erts_encode_dist_ext_size(Eterm term, Uint32 flags, ErtsAtomCacheMap *acmp,
@@ -520,7 +562,7 @@ int erts_encode_dist_ext_size(Eterm term, Uint32 flags, ErtsAtomCacheMap *acmp,
return -1;
} else {
#ifndef ERTS_DEBUG_USE_DIST_SEP
- if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ if (!(flags & (DFLAG_DIST_HDR_ATOM_CACHE | DFLAG_NO_MAGIC)))
#endif
sz++ /* VERSION_MAGIC */;
@@ -536,7 +578,7 @@ int erts_encode_dist_ext_size_int(Eterm term, struct erts_dsig_send_context* ctx
return -1;
} else {
#ifndef ERTS_DEBUG_USE_DIST_SEP
- if (!(ctx->flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ if (!(ctx->flags & (DFLAG_DIST_HDR_ATOM_CACHE | DFLAG_NO_MAGIC)))
#endif
sz++ /* VERSION_MAGIC */;
@@ -568,7 +610,7 @@ int erts_encode_dist_ext(Eterm term, byte **ext, Uint32 flags, ErtsAtomCacheMap
{
if (!ctx || !ctx->wstack.wstart) {
#ifndef ERTS_DEBUG_USE_DIST_SEP
- if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ if (!(flags & (DFLAG_DIST_HDR_ATOM_CACHE | DFLAG_NO_MAGIC)))
#endif
*(*ext)++ = VERSION_MAGIC;
}
@@ -616,7 +658,7 @@ erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize)
sys_memcpy((void *) ep, (void *) edep, dist_ext_sz);
ep += dist_ext_sz;
if (new_edep->dep)
- erts_refc_inc(&new_edep->dep->refc, 1);
+ erts_ref_dist_entry(new_edep->dep);
new_edep->extp = ep;
new_edep->ext_endp = ep + ext_sz;
new_edep->heap_size = -1;
@@ -629,57 +671,56 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
byte *ext,
Uint size,
DistEntry *dep,
+ Uint32 conn_id,
ErtsAtomCache *cache)
{
-#undef ERTS_EXT_FAIL
-#undef ERTS_EXT_HDR_FAIL
-#if 1
-#define ERTS_EXT_FAIL goto fail
-#define ERTS_EXT_HDR_FAIL goto bad_hdr
-#else
-#define ERTS_EXT_FAIL abort()
-#define ERTS_EXT_HDR_FAIL abort()
-#endif
-
- register byte *ep = ext;
- int utf8_atoms = (int) (dep->flags & DFLAG_UTF8_ATOMS);
+ register byte *ep;
edep->heap_size = -1;
- edep->ext_endp = ext+size;
+ edep->flags = 0;
+ edep->dep = dep;
+
+ ASSERT(dep);
+ erts_de_rlock(dep);
+
+ ASSERT(dep->flags & DFLAG_UTF8_ATOMS);
+
+ if ((dep->state != ERTS_DE_STATE_CONNECTED &&
+ dep->state != ERTS_DE_STATE_PENDING)
+ || dep->connection_id != conn_id) {
+ erts_de_runlock(dep);
+ return ERTS_PREP_DIST_EXT_CLOSED;
+ }
+
+ if (!(dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)) {
+ /* Skip PASS_THROUGH */
+ ext++;
+ size--;
+ }
+ edep->ext_endp = ext + size;
+ ep = ext;
if (size < 2)
- ERTS_EXT_FAIL;
+ goto fail;
if (ep[0] != VERSION_MAGIC) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- if (dep)
- erts_dsprintf(dsbufp,
- "** Got message from incompatible erlang on "
- "channel %d\n",
- dist_entry_channel_no(dep));
- else
- erts_dsprintf(dsbufp,
- "** Attempt to convert old incompatible "
- "binary %d\n",
- *ep);
+ erts_dsprintf(dsbufp,
+ "** Got message from incompatible erlang on "
+ "channel %d\n",
+ dist_entry_channel_no(dep));
erts_send_error_to_logger_nogl(dsbufp);
- ERTS_EXT_FAIL;
+ goto fail;
}
- edep->flags = 0;
- edep->dep = dep;
- if (dep) {
- erts_smp_de_rlock(dep);
- if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)
- edep->flags |= ERTS_DIST_EXT_DFLAG_HDR;
-
- edep->flags |= (dep->connection_id & ERTS_DIST_EXT_CON_ID_MASK);
- erts_smp_de_runlock(dep);
- }
+ if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)
+ edep->flags |= ERTS_DIST_EXT_DFLAG_HDR;
+
+ edep->connection_id = dep->connection_id;
if (ep[1] != DIST_HEADER) {
if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR)
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
edep->attab.size = 0;
edep->extp = ext;
}
@@ -688,17 +729,17 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
int no_atoms;
if (!(edep->flags & ERTS_DIST_EXT_DFLAG_HDR))
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
#undef CHKSIZE
#define CHKSIZE(SZ) \
- do { if ((SZ) > edep->ext_endp - ep) ERTS_EXT_HDR_FAIL; } while(0)
+ do { if ((SZ) > edep->ext_endp - ep) goto bad_hdr; } while(0)
CHKSIZE(1+1+1);
ep += 2;
no_atoms = (int) get_int8(ep);
if (no_atoms < 0 || ERTS_ATOM_CACHE_SIZE < no_atoms)
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
ep++;
if (no_atoms) {
int long_atoms = 0;
@@ -776,18 +817,18 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
/* atom already cached */
cix += (int) get_int8(ep);
if (cix >= ERTS_ATOM_CACHE_SIZE)
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
ep++;
atom = cache->in_arr[cix];
if (!is_atom(atom))
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
edep->attab.atom[tix] = atom;
}
else {
/* new cached atom */
cix += (int) get_int8(ep);
if (cix >= ERTS_ATOM_CACHE_SIZE)
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
ep++;
if (long_atoms) {
CHKSIZE(2);
@@ -802,12 +843,10 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
CHKSIZE(len);
atom = erts_atom_put((byte *) ep,
len,
- (utf8_atoms
- ? ERTS_ATOM_ENC_UTF8
- : ERTS_ATOM_ENC_LATIN1),
+ ERTS_ATOM_ENC_UTF8,
0);
if (is_non_value(atom))
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
ep += len;
cache->in_arr[cix] = atom;
edep->attab.atom[tix] = atom;
@@ -827,22 +866,21 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
edep->extp = ep;
#ifdef ERTS_DEBUG_USE_DIST_SEP
if (*ep != VERSION_MAGIC)
- ERTS_EXT_HDR_FAIL;
+ goto bad_hdr;
#endif
}
#ifdef ERTS_DEBUG_USE_DIST_SEP
if (*ep != VERSION_MAGIC)
- ERTS_EXT_FAIL;
+ goto fail;
#endif
- return 0;
+ erts_de_runlock(dep);
+
+ return ERTS_PREP_DIST_EXT_SUCCESS;
#undef CHKSIZE
-#undef ERTS_EXT_FAIL
-#undef ERTS_EXT_HDR_FAIL
- bad_hdr:
- if (dep) {
+ bad_hdr: {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"%T got a corrupted distribution header from %T "
@@ -855,10 +893,11 @@ erts_prepare_dist_ext(ErtsDistExternal *edep,
erts_dsprintf(dsbufp, ">>");
erts_send_warning_to_logger_nogl(dsbufp);
}
- fail:
- if (dep)
- erts_kill_dist_connection(dep, dep->connection_id);
- return -1;
+ fail: {
+ erts_de_runlock(dep);
+ erts_kill_dist_connection(dep, conn_id);
+ }
+ return ERTS_PREP_DIST_EXT_FAILED;
}
static void
@@ -889,7 +928,7 @@ bad_dist_ext(ErtsDistExternal *edep)
erts_dsprintf(dsbufp, ", %d=%T", i, edep->attab.atom[i]);
}
erts_send_warning_to_logger_nogl(dsbufp);
- erts_kill_dist_connection(dep, ERTS_DIST_EXT_CON_ID(edep));
+ erts_kill_dist_connection(dep, edep->connection_id);
}
}
@@ -1079,7 +1118,7 @@ static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1)
Eterm *tp = tuple_val(BIF_ARG_1);
Eterm Term = tp[1];
Eterm bt = tp[2];
- Binary *bin = ((ProcBin *) binary_val(bt))->val;
+ Binary *bin = erts_magic_ref2bin(bt);
Eterm res = erts_term_to_binary_int(BIF_P, Term, 0, 0,bin);
if (is_tuple(res)) {
ASSERT(BIF_P->flags & F_DISABLE_GC);
@@ -1129,8 +1168,11 @@ BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
case 0:
flags = TERM_TO_BINARY_DFLAGS & ~DFLAG_NEW_FLOATS;
break;
- case 1:
+ case 1: /* Current default... */
flags = TERM_TO_BINARY_DFLAGS;
+ break;
+ case 2:
+ flags = TERM_TO_BINARY_DFLAGS | DFLAG_UTF8_ATOMS;
break;
default:
goto error;
@@ -1211,7 +1253,8 @@ typedef struct B2TContext_t {
ErtsBinary2TermState b2ts;
Uint32 flags;
SWord reds;
- Eterm trap_bin;
+ Uint used_bytes; /* In: boolean, Out: bytes */
+ Eterm trap_bin; /* THE_NON_VALUE if not exported */
Export *bif;
Eterm arg[2];
enum B2TState state;
@@ -1222,6 +1265,7 @@ typedef struct B2TContext_t {
} u;
} B2TContext;
+static B2TContext* b2t_export_context(Process*, B2TContext* src);
static uLongf binary2term_uncomp_size(byte* data, Sint size)
{
@@ -1254,7 +1298,7 @@ static uLongf binary2term_uncomp_size(byte* data, Sint size)
static ERTS_INLINE int
binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
- B2TContext* ctx)
+ B2TContext** ctxp, Process* p)
{
byte *bytes = data;
Sint size = data_size;
@@ -1268,8 +1312,8 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
size--;
if (size < 5 || *bytes != COMPRESSED) {
state->extp = bytes;
- if (ctx)
- ctx->state = B2TSizeInit;
+ if (ctxp)
+ (*ctxp)->state = B2TSizeInit;
}
else {
uLongf dest_len = (Uint32) get_int32(bytes+1);
@@ -1286,16 +1330,31 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
return -1;
}
state->extp = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA, dest_len);
- ctx->reds -= dest_len;
+ if (ctxp)
+ (*ctxp)->reds -= dest_len;
}
state->exttmp = 1;
- if (ctx) {
+ if (ctxp) {
+ /*
+ * Start decompression by exporting trap context
+ * so we don't have to deal with deep-copying z_stream.
+ */
+ B2TContext* ctx = b2t_export_context(p, *ctxp);
+ ASSERT(state = &(*ctxp)->b2ts);
+ state = &ctx->b2ts;
+
if (erl_zlib_inflate_start(&ctx->u.uc.stream, bytes, size) != Z_OK)
return -1;
ctx->u.uc.dbytes = state->extp;
ctx->u.uc.dleft = dest_len;
+ if (ctx->used_bytes) {
+ ASSERT(ctx->used_bytes == 1);
+ /* to be subtracted by stream.avail_in when done */
+ ctx->used_bytes = data_size;
+ }
ctx->state = B2TUncompressChunk;
+ *ctxp = ctx;
}
else {
uLongf dlen = dest_len;
@@ -1339,7 +1398,7 @@ erts_binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size
{
Sint res;
- if (binary2term_prepare(state, data, data_size, NULL) < 0 ||
+ if (binary2term_prepare(state, data, data_size, NULL, NULL) < 0 ||
(res=decoded_size(state->extp, state->extp + state->extsize, 0, NULL)) < 0) {
if (state->exttmp)
@@ -1387,21 +1446,24 @@ static void b2t_destroy_context(B2TContext* context)
}
}
-static void b2t_context_destructor(Binary *context_bin)
+static int b2t_context_destructor(Binary *context_bin)
{
B2TContext* ctx = (B2TContext*) ERTS_MAGIC_BIN_DATA(context_bin);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
b2t_destroy_context(ctx);
+ return 1;
}
+static BIF_RETTYPE binary_to_term_int(Process*, Eterm bin, B2TContext*);
+
+
static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1)
{
- Binary *context_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+ Binary *context_bin = erts_magic_ref2bin(BIF_ARG_1);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor);
- return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin, NULL,
- THE_NON_VALUE, THE_NON_VALUE);
+ return binary_to_term_int(BIF_P, THE_NON_VALUE, ERTS_MAGIC_BIN_DATA(context_bin));
}
@@ -1427,17 +1489,18 @@ static B2TContext* b2t_export_context(Process* p, B2TContext* src)
b2t_context_destructor);
B2TContext* ctx = ERTS_MAGIC_BIN_DATA(context_b);
Eterm* hp;
+
+ ASSERT(is_non_value(src->trap_bin));
sys_memcpy(ctx, src, sizeof(B2TContext));
if (ctx->state >= B2TDecode && ctx->u.dc.next == &src->u.dc.res) {
ctx->u.dc.next = &ctx->u.dc.res;
}
- hp = HAlloc(p, PROC_BIN_SIZE);
- ctx->trap_bin = erts_mk_magic_binary_term(&hp, &MSO(p), context_b);
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE);
+ ctx->trap_bin = erts_mk_magic_ref(&hp, &MSO(p), context_b);
return ctx;
}
-static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b,
- Export *bif_init, Eterm arg0, Eterm arg1)
+static BIF_RETTYPE binary_to_term_int(Process* p, Eterm bin, B2TContext *ctx)
{
BIF_RETTYPE ret_val;
#ifdef EXTREME_B2T_TRAPPING
@@ -1445,25 +1508,17 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar
#else
SWord initial_reds = (Uint)(ERTS_BIF_REDS_LEFT(p) * B2T_BYTES_PER_REDUCTION);
#endif
- B2TContext c_buff;
- B2TContext *ctx;
int is_first_call;
- if (context_b == NULL) {
+ if (is_value(bin)) {
/* Setup enough to get started */
is_first_call = 1;
- ctx = &c_buff;
ctx->state = B2TPrepare;
ctx->aligned_alloc = NULL;
- ctx->flags = flags;
- ctx->bif = bif_init;
- ctx->arg[0] = arg0;
- ctx->arg[1] = arg1;
- IF_DEBUG(ctx->trap_bin = THE_NON_VALUE;)
} else {
- is_first_call = 0;
- ctx = ERTS_MAGIC_BIN_DATA(context_b);
+ ASSERT(is_value(ctx->trap_bin));
ASSERT(ctx->state != B2TPrepare);
+ is_first_call = 0;
}
ctx->reds = initial_reds;
@@ -1485,7 +1540,7 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar
if (ctx->aligned_alloc) {
ctx->reds -= bin_size / 8;
}
- if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, ctx) < 0) {
+ if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, &ctx, p) < 0) {
ctx->state = B2TBadArg;
}
break;
@@ -1507,6 +1562,10 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar
&& zret == Z_STREAM_END
&& ctx->u.uc.dleft == 0) {
ctx->reds -= chunk;
+ if (ctx->used_bytes) {
+ ASSERT(ctx->used_bytes > 5 + ctx->u.uc.stream.avail_in);
+ ctx->used_bytes -= ctx->u.uc.stream.avail_in;
+ }
ctx->state = B2TSizeInit;
}
else {
@@ -1525,11 +1584,11 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar
break;
case B2TDecodeInit:
- if (ctx == &c_buff && ctx->b2ts.extsize > ctx->reds) {
+ if (is_non_value(ctx->trap_bin) && ctx->b2ts.extsize > ctx->reds) {
/* dec_term will maybe trap, allocate space for magic bin
before result term to make it easy to trim with HRelease.
*/
- ctx = b2t_export_context(p, &c_buff);
+ ctx = b2t_export_context(p, ctx);
}
ctx->u.dc.ep = ctx->b2ts.extp;
ctx->u.dc.res = (Eterm) (UWord) NULL;
@@ -1572,6 +1631,25 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar
return ret_val;
case B2TDone:
+ if (ctx->used_bytes) {
+ Eterm *hp;
+ Eterm used;
+ if (!ctx->b2ts.exttmp) {
+ ASSERT(ctx->used_bytes == 1);
+ ctx->used_bytes = (ctx->u.dc.ep - ctx->b2ts.extp
+ +1); /* VERSION_MAGIC */
+ }
+ if (IS_USMALL(0, ctx->used_bytes)) {
+ hp = erts_produce_heap(&ctx->u.dc.factory, 3, 0);
+ used = make_small(ctx->used_bytes);
+ }
+ else {
+ hp = erts_produce_heap(&ctx->u.dc.factory, 3+BIG_UINT_HEAP_SIZE, 0);
+ used = uint_to_big(ctx->used_bytes, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ ctx->u.dc.res = TUPLE2(hp, ctx->u.dc.res, used);
+ }
b2t_destroy_context(ctx);
if (ctx->u.dc.factory.hp > ctx->u.dc.factory.hp_end) {
@@ -1592,11 +1670,10 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar
}
}while (ctx->reds > 0 || ctx->state >= B2TDone);
- if (ctx == &c_buff) {
- ASSERT(ctx->trap_bin == THE_NON_VALUE);
- ctx = b2t_export_context(p, &c_buff);
+ if (is_non_value(ctx->trap_bin)) {
+ ctx = b2t_export_context(p, ctx);
+ ASSERT(is_value(ctx->trap_bin));
}
- ASSERT(ctx->trap_bin != THE_NON_VALUE);
if (is_first_call) {
erts_set_gc_state(p, 0);
@@ -1613,23 +1690,35 @@ HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 1)
BIF_RETTYPE binary_to_term_1(BIF_ALIST_1)
{
- return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_1],
- BIF_ARG_1, THE_NON_VALUE);
+ B2TContext ctx;
+
+ ctx.flags = 0;
+ ctx.used_bytes = 0;
+ ctx.trap_bin = THE_NON_VALUE;
+ ctx.bif = bif_export[BIF_binary_to_term_1];
+ ctx.arg[0] = BIF_ARG_1;
+ ctx.arg[1] = THE_NON_VALUE;
+ return binary_to_term_int(BIF_P, BIF_ARG_1, &ctx);
}
HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 2)
BIF_RETTYPE binary_to_term_2(BIF_ALIST_2)
{
+ B2TContext ctx;
Eterm opts;
Eterm opt;
- Uint32 flags = 0;
+ ctx.flags = 0;
+ ctx.used_bytes = 0;
opts = BIF_ARG_2;
while (is_list(opts)) {
opt = CAR(list_val(opts));
if (opt == am_safe) {
- flags |= ERTS_DIST_EXT_BTT_SAFE;
+ ctx.flags |= ERTS_DIST_EXT_BTT_SAFE;
+ }
+ else if (opt == am_used) {
+ ctx.used_bytes = 1;
}
else {
goto error;
@@ -1640,8 +1729,11 @@ BIF_RETTYPE binary_to_term_2(BIF_ALIST_2)
if (is_not_nil(opts))
goto error;
- return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_2],
- BIF_ARG_1, BIF_ARG_2);
+ ctx.trap_bin = THE_NON_VALUE;
+ ctx.bif = bif_export[BIF_binary_to_term_2];
+ ctx.arg[0] = BIF_ARG_1;
+ ctx.arg[1] = BIF_ARG_2;
+ return binary_to_term_int(BIF_P, BIF_ARG_1, &ctx);
error:
BIF_ERROR(BIF_P, BADARG);
@@ -1797,7 +1889,7 @@ erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags) {
#endif
#define TERM_TO_BINARY_MEMCPY_FACTOR 8
-static void ttb_context_destructor(Binary *context_bin)
+static int ttb_context_destructor(Binary *context_bin)
{
TTBContext *context = ERTS_MAGIC_BIN_DATA(context_bin);
if (context->alive) {
@@ -1809,7 +1901,7 @@ static void ttb_context_destructor(Binary *context_bin)
case TTBEncode:
DESTROY_SAVED_WSTACK(&context->s.ec.wstack);
if (context->s.ec.result_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.ec.result_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.ec.result_bin->intern.refc),1));
erts_bin_free(context->s.ec.result_bin);
context->s.ec.result_bin = NULL;
}
@@ -1818,19 +1910,20 @@ static void ttb_context_destructor(Binary *context_bin)
erl_zlib_deflate_finish(&(context->s.cc.stream));
if (context->s.cc.destination_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.cc.destination_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.cc.destination_bin->intern.refc),1));
erts_bin_free(context->s.cc.destination_bin);
context->s.cc.destination_bin = NULL;
}
if (context->s.cc.result_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.cc.result_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.cc.result_bin->intern.refc),1));
erts_bin_free(context->s.cc.result_bin);
context->s.cc.result_bin = NULL;
}
break;
}
}
+ return 1;
}
static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint flags,
@@ -1854,14 +1947,15 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
context_b = erts_create_magic_binary(sizeof(TTBContext), \
ttb_context_destructor); \
context = ERTS_MAGIC_BIN_DATA(context_b); \
- memcpy(context,&c_buff,sizeof(TTBContext)); \
+ sys_memcpy(context,&c_buff,sizeof(TTBContext)); \
} \
} while (0)
#define RETURN_STATE() \
do { \
- hp = HAlloc(p, PROC_BIN_SIZE+3); \
- c_term = erts_mk_magic_binary_term(&hp, &MSO(p), context_b); \
+ static const int TUPLE2_SIZE = 2 + 1; \
+ hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE + TUPLE2_SIZE); \
+ c_term = erts_mk_magic_ref(&hp, &MSO(p), context_b); \
res = TUPLE2(hp, Term, c_term); \
BUMP_ALL_REDS(p); \
return res; \
@@ -1907,8 +2001,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
}
result_bin = erts_bin_nrml_alloc(size);
- erts_refc_init(&result_bin->refc, 0);
- result_bin->orig_bytes[0] = VERSION_MAGIC;
+ result_bin->orig_bytes[0] = (byte)VERSION_MAGIC;
/* Next state immediately, no need to export context */
context->state = TTBEncode;
context->s.ec.flags = flags;
@@ -1934,24 +2027,14 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
level = context->s.ec.level;
BUMP_REDS(p, (initial_reds - reds) / TERM_TO_BINARY_LOOP_FACTOR);
if (level == 0 || real_size < 6) { /* We are done */
- ProcBin* pb;
return_normal:
context->s.ec.result_bin = NULL;
context->alive = 0;
- pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = real_size;
- pb->next = MSO(p).first;
- MSO(p).first = (struct erl_off_heap_header*)pb;
- pb->val = result_bin;
- pb->bytes = (byte*) result_bin->orig_bytes;
- pb->flags = 0;
- OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
- return make_binary(pb);
+ return erts_build_proc_bin(&MSO(p), HAlloc(p, PROC_BIN_SIZE),
+ result_bin);
}
/* Continue with compression... */
/* To make absolutely sure that zlib does not barf on a reallocated context,
@@ -1967,8 +2050,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
context->s.cc.result_bin = result_bin;
result_bin = erts_bin_nrml_alloc(real_size);
- erts_refc_init(&result_bin->refc, 0);
- result_bin->orig_bytes[0] = VERSION_MAGIC;
+ result_bin->orig_bytes[0] = (byte) VERSION_MAGIC;
context->s.cc.destination_bin = result_bin;
context->s.cc.dest_len = 0;
@@ -2009,24 +2091,17 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
result_bin = erts_bin_realloc(context->s.cc.destination_bin,
context->s.cc.dest_len+6);
context->s.cc.destination_bin = NULL;
- pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = context->s.cc.dest_len+6;
- pb->next = MSO(p).first;
- MSO(p).first = (struct erl_off_heap_header*)pb;
- pb->val = result_bin;
- pb->bytes = (byte*) result_bin->orig_bytes;
- pb->flags = 0;
- OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
+ ASSERT(erts_refc_read(&result_bin->intern.refc, 1));
erts_bin_free(context->s.cc.result_bin);
context->s.cc.result_bin = NULL;
context->alive = 0;
BUMP_REDS(p, (this_time * CONTEXT_REDS) / TERM_TO_BINARY_COMPRESS_CHUNK);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
- return make_binary(pb);
+ return erts_build_proc_bin(&MSO(p),
+ HAlloc(p, PROC_BIN_SIZE),
+ result_bin);
}
default: /* Compression error, revert to uncompressed binary (still in
context) */
@@ -2042,13 +2117,13 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
pb->bytes = (byte*) result_bin->orig_bytes;
pb->flags = 0;
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
+ ASSERT(erts_refc_read(&result_bin->intern.refc, 1));
erl_zlib_deflate_finish(&(context->s.cc.stream));
erts_bin_free(context->s.cc.destination_bin);
context->s.cc.destination_bin = NULL;
context->alive = 0;
BUMP_REDS(p, (this_time * CONTEXT_REDS) / TERM_TO_BINARY_COMPRESS_CHUNK);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
return make_binary(pb);
@@ -2080,7 +2155,7 @@ enc_atom(ErtsAtomCacheMap *acmp, Eterm atom, byte *ep, Uint32 dflags)
{
int iix;
int len;
- int utf8_atoms = (int) (dflags & DFLAG_UTF8_ATOMS);
+ const int utf8_atoms = (int) (dflags & DFLAG_UTF8_ATOMS);
ASSERT(is_atom(atom));
@@ -2163,12 +2238,8 @@ enc_atom(ErtsAtomCacheMap *acmp, Eterm atom, byte *ep, Uint32 dflags)
* We use this atom as sysname in local pid/port/refs
* for the ETS compressed format (DFLAG_INTERNAL_TAGS).
*
- * We used atom '' earlier but that turned out to cause problems
- * for buggy erl_interface/ic usage of c-nodes with empty node names.
- * A long atom reduces risk of nodes actually called this and the length
- * does not matter anyway as it's encoded with atom index (ATOM_INTERNAL_REF2).
*/
-#define INTERNAL_LOCAL_SYSNAME am_await_microstate_accounting_modifications
+#define INTERNAL_LOCAL_SYSNAME am_ErtsSecretAtom
static byte*
enc_pid(ErtsAtomCacheMap *acmp, Eterm pid, byte* ep, Uint32 dflags)
@@ -2479,8 +2550,6 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
break;
}
- L_jump_start:
-
if (ctx && --r <= 0) {
*reds = 0;
ctx->obj = obj;
@@ -2488,6 +2557,8 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
WSTACK_SAVE(s, &ctx->wstack);
return -1;
}
+
+ L_jump_start:
switch(tag_val_def(obj)) {
case NIL_DEF:
*ep++ = NIL_EXT;
@@ -2572,7 +2643,9 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
- i = ref_no_of_numbers(obj);
+ erts_magic_ref_save_bin(obj);
+
+ i = ref_no_numbers(obj);
put_int16(i, ep);
ep += 2;
ep = enc_atom(acmp, sysname, ep, dflags);
@@ -2765,7 +2838,7 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
erts_emasculate_writable_binary(pb);
bytes += (pb->val->orig_bytes - before_realloc);
}
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
sys_memcpy(&tmp, pb, sizeof(ProcBin));
tmp.next = *off_heap;
@@ -2795,6 +2868,8 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ep[j] = 0; /* Zero unused bits at end of binary */
data_dst = ep;
ep += j + 1;
+ if (ctx)
+ ctx->hopefull_flags |= DFLAG_BIT_BINARIES;
} else {
/*
* Bit-level binary, but the receiver doesn't support it.
@@ -2826,9 +2901,12 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
Export* exp = *((Export **) (export_val(obj) + 1));
if ((dflags & DFLAG_EXPORT_PTR_TAG) != 0) {
*ep++ = EXPORT_EXT;
- ep = enc_atom(acmp, exp->code[0], ep, dflags);
- ep = enc_atom(acmp, exp->code[1], ep, dflags);
- ep = enc_term(acmp, make_small(exp->code[2]), ep, dflags, off_heap);
+ ep = enc_atom(acmp, exp->info.mfa.module, ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.function, ep, dflags);
+ ep = enc_term(acmp, make_small(exp->info.mfa.arity),
+ ep, dflags, off_heap);
+ if (ctx)
+ ctx->hopefull_flags |= DFLAG_EXPORT_PTR_TAG;
} else {
/* Tag, arity */
*ep++ = SMALL_TUPLE_EXT;
@@ -2836,10 +2914,10 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ep += 1;
/* Module name */
- ep = enc_atom(acmp, exp->code[0], ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.module, ep, dflags);
/* Function name */
- ep = enc_atom(acmp, exp->code[1], ep, dflags);
+ ep = enc_atom(acmp, exp->info.mfa.function, ep, dflags);
}
break;
}
@@ -2849,62 +2927,27 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
ErlFunThing* funp = (ErlFunThing *) fun_val(obj);
int ei;
- if ((dflags & DFLAG_NEW_FUN_TAGS) != 0) {
- *ep++ = NEW_FUN_EXT;
- WSTACK_PUSH2(s, ENC_PATCH_FUN_SIZE,
- (UWord) ep); /* Position for patching in size */
- ep += 4;
- *ep = funp->arity;
- ep += 1;
- sys_memcpy(ep, funp->fe->uniq, 16);
- ep += 16;
- put_int32(funp->fe->index, ep);
- ep += 4;
- put_int32(funp->num_free, ep);
- ep += 4;
- ep = enc_atom(acmp, funp->fe->module, ep, dflags);
- ep = enc_term(acmp, make_small(funp->fe->old_index), ep, dflags, off_heap);
- ep = enc_term(acmp, make_small(funp->fe->old_uniq), ep, dflags, off_heap);
- ep = enc_pid(acmp, funp->creator, ep, dflags);
- } else {
- /*
- * Communicating with an obsolete erl_interface or
- * jinterface node. Convert the fun to a tuple to
- * avoid crasching.
- */
-
- /* Tag, arity */
- *ep++ = SMALL_TUPLE_EXT;
- put_int8(5, ep);
- ep += 1;
-
- /* 'fun' */
- ep = enc_atom(acmp, am_fun, ep, dflags);
-
- /* Module name */
- ep = enc_atom(acmp, funp->fe->module, ep, dflags);
-
- /* Index, Uniq */
- *ep++ = INTEGER_EXT;
- put_int32(funp->fe->old_index, ep);
- ep += 4;
- *ep++ = INTEGER_EXT;
- put_int32(funp->fe->old_uniq, ep);
- ep += 4;
-
- /* Environment sub-tuple arity */
- ASSERT(funp->num_free < MAX_ARG);
- *ep++ = SMALL_TUPLE_EXT;
- put_int8(funp->num_free, ep);
- ep += 1;
- }
- for (ei = funp->num_free-1; ei > 0; ei--) {
+ ASSERT(dflags & DFLAG_NEW_FUN_TAGS);
+ *ep++ = NEW_FUN_EXT;
+ WSTACK_PUSH2(s, ENC_PATCH_FUN_SIZE,
+ (UWord) ep); /* Position for patching in size */
+ ep += 4;
+ *ep = funp->arity;
+ ep += 1;
+ sys_memcpy(ep, funp->fe->uniq, 16);
+ ep += 16;
+ put_int32(funp->fe->index, ep);
+ ep += 4;
+ put_int32(funp->num_free, ep);
+ ep += 4;
+ ep = enc_atom(acmp, funp->fe->module, ep, dflags);
+ ep = enc_term(acmp, make_small(funp->fe->old_index), ep, dflags, off_heap);
+ ep = enc_term(acmp, make_small(funp->fe->old_uniq), ep, dflags, off_heap);
+ ep = enc_pid(acmp, funp->creator, ep, dflags);
+
+ for (ei = funp->num_free-1; ei >= 0; ei--) {
WSTACK_PUSH2(s, ENC_TERM, (UWord) funp->env[ei]);
}
- if (funp->num_free != 0) {
- obj = funp->env[0];
- goto L_jump_start;
- }
}
break;
}
@@ -3097,7 +3140,7 @@ dec_term(ErtsDistExternal *edep,
#if defined(ARCH_64)
*objp = make_small(sn);
#else
- if (MY_IS_SSMALL(sn)) {
+ if (IS_SSMALL(sn)) {
*objp = make_small(sn);
} else {
*objp = small_to_big(sn, hp);
@@ -3242,6 +3285,7 @@ dec_term_atom_common:
n--;
if (ctx) {
if (reds < n) {
+ ASSERT(reds > 0);
ctx->state = B2TDecodeList;
ctx->u.dc.remaining_n = n - reds;
n = reds;
@@ -3427,26 +3471,35 @@ dec_term_atom_common:
r0 = get_int32(ep); /* allow full word */
ep += 4;
- ref_ext_common:
+ ref_ext_common: {
+ ErtsORefThing *rtp;
+
if (ref_words > ERTS_MAX_REF_NUMBERS)
goto error;
node = dec_get_node(sysname, cre);
if(node == erts_this_node) {
- RefThing *rtp = (RefThing *) hp;
- ref_num = (Uint32 *) (hp + REF_THING_HEAD_SIZE);
+
+ rtp = (ErtsORefThing *) hp;
+ ref_num = &rtp->num[0];
+ if (ref_words != ERTS_REF_NUMBERS) {
+ int i;
+ if (ref_words > ERTS_REF_NUMBERS)
+ goto error; /* Not a ref that we created... */
+ for (i = ref_words; i < ERTS_REF_NUMBERS; i++)
+ ref_num[i] = 0;
+ }
-#if defined(ARCH_64)
- hp += REF_THING_HEAD_SIZE + ref_words/2 + 1;
- rtp->header = make_ref_thing_header(ref_words/2 + 1);
-#else
- hp += REF_THING_HEAD_SIZE + ref_words;
- rtp->header = make_ref_thing_header(ref_words);
+#ifdef ERTS_ORDINARY_REF_MARKER
+ rtp->marker = ERTS_ORDINARY_REF_MARKER;
#endif
+ hp += ERTS_REF_THING_SIZE;
+ rtp->header = ERTS_REF_THING_HEADER;
*objp = make_internal_ref(rtp);
}
else {
ExternalThing *etp = (ExternalThing *) hp;
+ rtp = NULL;
#if defined(ARCH_64)
hp += EXTERNAL_THING_HEAD_SIZE + ref_words/2 + 1;
#else
@@ -3464,12 +3517,13 @@ dec_term_atom_common:
factory->off_heap->first = (struct erl_off_heap_header*)etp;
*objp = make_external_ref(etp);
ref_num = &(etp->data.ui32[0]);
- }
-
#if defined(ARCH_64)
- *(ref_num++) = ref_words /* 32-bit arity */;
+ *(ref_num++) = ref_words /* 32-bit arity */;
#endif
+ }
+
ref_num[0] = r0;
+
for(i = 1; i < ref_words; i++) {
ref_num[i] = get_int32(ep);
ep += 4;
@@ -3478,8 +3532,26 @@ dec_term_atom_common:
if ((1 + ref_words) % 2)
ref_num[ref_words] = 0;
#endif
+ if (node == erts_this_node) {
+ /* Check if it was a magic reference... */
+ ErtsMagicBinary *mb = erts_magic_ref_lookup_bin(ref_num);
+ if (mb) {
+ /*
+ * Was a magic ref; adjust it...
+ *
+ * Refc on binary was increased by lookup above...
+ */
+ ASSERT(rtp);
+ hp = (Eterm *) rtp;
+ write_magic_ref_thing(hp, factory->off_heap, mb);
+ OH_OVERHEAD(factory->off_heap,
+ mb->orig_size / sizeof(Eterm));
+ hp += ERTS_MAGIC_REF_THING_SIZE;
+ }
+ }
break;
}
+ }
case BINARY_EXT:
{
n = get_int32(ep);
@@ -3495,19 +3567,9 @@ dec_term_atom_common:
*objp = make_binary(hb);
} else {
Binary* dbin = erts_bin_nrml_alloc(n);
- ProcBin* pb;
- erts_refc_init(&dbin->refc, 1);
- pb = (ProcBin *) hp;
+
+ *objp = erts_build_proc_bin(factory->off_heap, hp, dbin);
hp += PROC_BIN_SIZE;
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = n;
- pb->next = factory->off_heap->first;
- factory->off_heap->first = (struct erl_off_heap_header*)pb;
- OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm));
- pb->val = dbin;
- pb->bytes = (byte*) dbin->orig_bytes;
- pb->flags = 0;
- *objp = make_binary(pb);
if (ctx) {
int n_limit = reds * B2T_MEMCPY_FACTOR;
if (n > n_limit) {
@@ -3547,19 +3609,9 @@ dec_term_atom_common:
ep += n;
} else {
Binary* dbin = erts_bin_nrml_alloc(n);
- ProcBin* pb;
+ Uint n_copy = n;
- erts_refc_init(&dbin->refc, 1);
- pb = (ProcBin *) hp;
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = n;
- pb->next = factory->off_heap->first;
- factory->off_heap->first = (struct erl_off_heap_header*)pb;
- OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm));
- pb->val = dbin;
- pb->bytes = (byte*) dbin->orig_bytes;
- pb->flags = 0;
- bin = make_binary(pb);
+ bin = erts_build_proc_bin(factory->off_heap, hp, dbin);
hp += PROC_BIN_SIZE;
if (ctx) {
int n_limit = reds * B2T_MEMCPY_FACTOR;
@@ -3567,15 +3619,15 @@ dec_term_atom_common:
ctx->state = B2TDecodeBinary;
ctx->u.dc.remaining_n = n - n_limit;
ctx->u.dc.remaining_bytes = dbin->orig_bytes + n_limit;
- n = n_limit;
+ n_copy = n_limit;
reds = 0;
}
- else
+ else {
reds -= n / B2T_MEMCPY_FACTOR;
+ }
}
- sys_memcpy(dbin->orig_bytes, ep, n);
- ep += n;
- n = pb->size;
+ sys_memcpy(dbin->orig_bytes, ep, n_copy);
+ ep += n_copy;
}
if (bitsize == 8 || n == 0) {
@@ -3747,9 +3799,8 @@ dec_term_atom_common:
funp->arity = arity;
#ifdef HIPE
if (funp->fe->native_address == NULL) {
- hipe_set_closure_stub(funp->fe, num_free);
+ hipe_set_closure_stub(funp->fe);
}
- funp->native_address = funp->fe->native_address;
#endif
hp = factory->hp;
@@ -3821,9 +3872,6 @@ dec_term_atom_common:
funp->fe = erts_put_fun_entry(module, old_uniq, old_index);
funp->arity = funp->fe->address[-1] - num_free;
-#ifdef HIPE
- funp->native_address = funp->fe->native_address;
-#endif
hp = factory->hp;
/* Environment */
@@ -3856,7 +3904,7 @@ dec_term_atom_common:
sys_memcpy(pb, ep, sizeof(ProcBin));
ep += sizeof(ProcBin);
- erts_refc_inc(&pb->val->refc, 1);
+ erts_refc_inc(&pb->val->intern.refc, 1);
hp += PROC_BIN_SIZE;
pb->next = factory->off_heap->first;
factory->off_heap->first = (struct erl_off_heap_header*)pb;
@@ -3874,7 +3922,7 @@ dec_term_atom_common:
sys_memcpy(pb, ep, sizeof(ProcBin));
ep += sizeof(ProcBin);
- erts_refc_inc(&pb->val->refc, 1);
+ erts_refc_inc(&pb->val->intern.refc, 1);
hp += PROC_BIN_SIZE;
pb->next = factory->off_heap->first;
factory->off_heap->first = (struct erl_off_heap_header*)pb;
@@ -3960,6 +4008,7 @@ dec_term_atom_common:
if (ctx) {
ctx->state = B2TDone;
ctx->reds = reds;
+ ctx->u.dc.ep = ep;
}
return ep;
@@ -3974,7 +4023,7 @@ error:
factory->hp = hp; /* the largest must be the freshest */
}
}
- else ASSERT(factory->hp == hp);
+ else ASSERT(!factory->hp || factory->hp == hp);
error_hamt:
erts_factory_undo(factory);
@@ -4105,7 +4154,7 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
/*fall through*/
case REF_DEF:
ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
- i = ref_no_of_numbers(obj);
+ i = ref_no_numbers(obj);
result += (1 + 2 + encode_size_struct2(acmp, ref_node_name(obj), dflags) +
1 + 4*i);
break;
@@ -4229,24 +4278,12 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
{
ErlFunThing* funp = (ErlFunThing *) fun_val(obj);
- if ((dflags & DFLAG_NEW_FUN_TAGS) != 0) {
- result += 20+1+1+4; /* New ID + Tag */
- result += 4; /* Length field (number of free variables */
- result += encode_size_struct2(acmp, funp->creator, dflags);
- result += encode_size_struct2(acmp, funp->fe->module, dflags);
- result += 2 * (1+4); /* Index, Uniq */
- } else {
- /*
- * Size when fun is mapped to a tuple.
- */
- result += 1 + 1; /* Tuple tag, arity */
- result += 1 + 1 + 2 +
- atom_tab(atom_val(am_fun))->len; /* 'fun' */
- result += 1 + 1 + 2 +
- atom_tab(atom_val(funp->fe->module))->len; /* Module name */
- result += 2 * (1 + 4); /* Index + Uniq */
- result += 1 + (funp->num_free < 0x100 ? 1 : 4);
- }
+ ASSERT(dflags & DFLAG_NEW_FUN_TAGS);
+ result += 20+1+1+4; /* New ID + Tag */
+ result += 4; /* Length field (number of free variables */
+ result += encode_size_struct2(acmp, funp->creator, dflags);
+ result += encode_size_struct2(acmp, funp->fe->module, dflags);
+ result += 2 * (1+4); /* Index, Uniq */
if (funp->num_free > 1) {
WSTACK_PUSH2(s, (UWord) (funp->env + 1),
(UWord) TERM_ARRAY_OP(funp->num_free-1));
@@ -4262,9 +4299,9 @@ encode_size_struct_int(TTBSizeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj,
{
Export* ep = *((Export **) (export_val(obj) + 1));
result += 1;
- result += encode_size_struct2(acmp, ep->code[0], dflags);
- result += encode_size_struct2(acmp, ep->code[1], dflags);
- result += encode_size_struct2(acmp, make_small(ep->code[2]), dflags);
+ result += encode_size_struct2(acmp, ep->info.mfa.module, dflags);
+ result += encode_size_struct2(acmp, ep->info.mfa.function, dflags);
+ result += encode_size_struct2(acmp, make_small(ep->info.mfa.arity), dflags);
}
break;
@@ -4334,7 +4371,7 @@ decoded_size(byte *ep, byte* endp, int internal_tags, B2TContext* ctx)
}
}
else
- reds = 0; /* not used but compiler warns anyway */
+ ERTS_UNDEF(reds, 0);
heap_size = 0;
terms = 1;
@@ -4644,3 +4681,182 @@ error:
#undef SKIP2
#undef CHKSIZE
}
+
+
+struct transcode_context {
+ enum {
+ TRANSCODE_DEC_MSG_SIZE,
+ TRANSCODE_DEC_MSG,
+ TRANSCODE_ENC_CTL,
+ TRANSCODE_ENC_MSG
+ }state;
+ Eterm ctl_term;
+ Eterm* ctl_heap;
+ ErtsHeapFactory ctl_factory;
+ Eterm* msg_heap;
+ B2TContext b2t;
+ TTBEncodeContext ttb;
+#ifdef DEBUG
+ ErtsDistOutputBuf* dbg_ob;
+#endif
+};
+
+void transcode_free_ctx(DistEntry* dep)
+{
+ struct transcode_context* ctx = dep->transcode_ctx;
+
+ erts_factory_close(&ctx->ctl_factory);
+ erts_free(ERTS_ALC_T_DIST_TRANSCODE, ctx->ctl_heap);
+
+ if (ctx->msg_heap) {
+ erts_factory_close(&ctx->b2t.u.dc.factory);
+ erts_free(ERTS_ALC_T_DIST_TRANSCODE, ctx->msg_heap);
+ }
+ erts_free(ERTS_ALC_T_DIST_TRANSCODE, ctx);
+ dep->transcode_ctx = NULL;
+}
+
+Sint transcode_dist_obuf(ErtsDistOutputBuf* ob,
+ DistEntry* dep,
+ Uint32 dflags,
+ Sint reds)
+{
+ Sint hsz;
+ byte* decp;
+ const int have_msg = !!ob->msg_start;
+ int i;
+ struct transcode_context* ctx = dep->transcode_ctx;
+
+ if (!ctx) { /* first call for 'ob' */
+ ASSERT(!(ob->hopefull_flags & ~(Uint)(DFLAG_BIT_BINARIES |
+ DFLAG_EXPORT_PTR_TAG)));
+ if (~dflags & ob->hopefull_flags) {
+ /*
+ * Receiver does not support bitstrings and/or export funs
+ * and output buffer contains such message tags (hopefull_flags).
+ * Must transcode control and message terms to use tuple fallbacks.
+ */
+ ctx = erts_alloc(ERTS_ALC_T_DIST_TRANSCODE, sizeof(struct transcode_context));
+ dep->transcode_ctx = ctx;
+ #ifdef DEBUG
+ ctx->dbg_ob = ob;
+ #endif
+
+ hsz = decoded_size(ob->extp, ob->ext_endp, 0, NULL);
+ ctx->ctl_heap = erts_alloc(ERTS_ALC_T_DIST_TRANSCODE, hsz*sizeof(Eterm));
+ erts_factory_tmp_init(&ctx->ctl_factory, ctx->ctl_heap, hsz, ERTS_ALC_T_DIST_TRANSCODE);
+ ctx->msg_heap = NULL;
+
+ decp = dec_term(NULL, &ctx->ctl_factory, ob->extp, &ctx->ctl_term, NULL);
+ if (have_msg) {
+ ASSERT(decp == ob->msg_start); (void)decp;
+ ctx->b2t.u.sc.ep = NULL;
+ ctx->b2t.state = B2TSize;
+ ctx->b2t.aligned_alloc = NULL;
+ ctx->b2t.b2ts.exttmp = 0;
+ ctx->state = TRANSCODE_DEC_MSG_SIZE;
+ }
+ else {
+ ASSERT(decp == ob->ext_endp);
+ ctx->state = TRANSCODE_ENC_CTL;
+ }
+ }
+ else if (!(dflags & DFLAG_DIST_HDR_ATOM_CACHE)) {
+ /*
+ * No need for full transcoding, but primitive receiver (erl_/jinterface)
+ * expects VERSION_MAGIC before both control and message terms.
+ */
+ if (ob->msg_start) {
+ Sint ctl_bytes = ob->msg_start - ob->extp;
+ ASSERT(ob->extp < ob->msg_start && ob->msg_start < ob->ext_endp);
+ /* Move control term back 1 byte to make room */
+ sys_memmove(ob->extp-1, ob->extp, ctl_bytes);
+ *--(ob->msg_start) = VERSION_MAGIC;
+ --(ob->extp);
+ reds -= ctl_bytes / (B2T_BYTES_PER_REDUCTION * B2T_MEMCPY_FACTOR);
+ }
+ *--(ob->extp) = VERSION_MAGIC;
+ goto done;
+ }
+ else
+ goto done;
+ }
+ else { /* continue after yield */
+ ASSERT(ctx->dbg_ob == ob);
+ }
+ ctx->b2t.reds = reds * B2T_BYTES_PER_REDUCTION;
+
+ switch (ctx->state) {
+ case TRANSCODE_DEC_MSG_SIZE:
+ hsz = decoded_size(ob->msg_start, ob->ext_endp, 0, &ctx->b2t);
+ if (ctx->b2t.state == B2TSize) {
+ return -1;
+ }
+ ASSERT(ctx->b2t.state == B2TDecodeInit);
+ ctx->msg_heap = erts_alloc(ERTS_ALC_T_DIST_TRANSCODE, hsz*sizeof(Eterm));
+ ctx->b2t.u.dc.ep = ob->msg_start;
+ ctx->b2t.u.dc.res = (Eterm) NULL;
+ ctx->b2t.u.dc.next = &ctx->b2t.u.dc.res;
+ erts_factory_tmp_init(&ctx->b2t.u.dc.factory,
+ ctx->msg_heap, hsz, ERTS_ALC_T_DIST_TRANSCODE);
+ ctx->b2t.u.dc.flat_maps.wstart = NULL;
+ ctx->b2t.u.dc.hamt_array.pstart = NULL;
+ ctx->b2t.state = B2TDecode;
+
+ ctx->state = TRANSCODE_DEC_MSG;
+ case TRANSCODE_DEC_MSG:
+ if (ctx->b2t.reds <= 0)
+ ctx->b2t.reds = 1;
+ decp = dec_term(NULL, NULL, NULL, NULL, &ctx->b2t);
+ if (ctx->b2t.state < B2TDone) {
+ return -1;
+ }
+ ASSERT(ctx->b2t.state == B2TDone);
+ ASSERT(decp && decp <= ob->ext_endp);
+ reds = ctx->b2t.reds / B2T_BYTES_PER_REDUCTION;
+ b2t_destroy_context(&ctx->b2t);
+
+ ctx->state = TRANSCODE_ENC_CTL;
+ case TRANSCODE_ENC_CTL:
+ if (!(dflags & DFLAG_DIST_HDR_ATOM_CACHE)) {
+ ASSERT(!(dflags & DFLAG_NO_MAGIC));
+ ob->extp -= 2; /* VERSION_MAGIC x 2 */
+ }
+ ob->ext_endp = ob->extp;
+ i = erts_encode_dist_ext(ctx->ctl_term, &ob->ext_endp, dflags,
+ NULL, NULL, NULL);
+ ASSERT(i == 0); (void)i;
+ ASSERT(ob->ext_endp <= ob->alloc_endp);
+
+ if (!have_msg) {
+ break;
+ }
+ ob->msg_start = ob->ext_endp;
+ ctx->ttb.wstack.wstart = NULL;
+ ctx->ttb.flags = dflags;
+ ctx->ttb.hopefull_flags = 0;
+ ctx->ttb.level = 0;
+
+ ctx->state = TRANSCODE_ENC_MSG;
+ case TRANSCODE_ENC_MSG:
+ reds *= TERM_TO_BINARY_LOOP_FACTOR;
+ if (erts_encode_dist_ext(ctx->b2t.u.dc.res, &ob->ext_endp, dflags, NULL,
+ &ctx->ttb, &reds)) {
+ return -1;
+ }
+ reds /= TERM_TO_BINARY_LOOP_FACTOR;
+
+ ASSERT(ob->ext_endp <= ob->alloc_endp);
+ ASSERT(!ctx->ttb.hopefull_flags);
+ }
+ transcode_free_ctx(dep);
+
+done:
+ if (!(dflags & DFLAG_DIST_HDR_ATOM_CACHE))
+ *--(ob->extp) = PASS_THROUGH;
+
+ if (reds < 0)
+ reds = 0;
+
+ return reds;
+}
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index f00426cc16..edac177cc6 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -83,7 +83,10 @@
#ifndef ERL_EXTERNAL_H__
#define ERL_EXTERNAL_H__
+#define ERL_NODE_TABLES_BASIC_ONLY
#include "erl_node_tables.h"
+#undef ERL_NODE_TABLES_BASIC_ONLY
+#include "erl_alloc.h"
#define ERTS_ATOM_CACHE_SIZE 2048
@@ -109,35 +112,26 @@ typedef struct {
} ErtsAtomTranslationTable;
/*
- * These flags are tagged onto the high bits of a connection ID and stored in
- * the ErtsDistExternal structure's flags field. They are used to indicate
- * various bits of state necessary to decode binaries in a variety of
- * scenarios. The mask ERTS_DIST_EXT_CON_ID_MASK is used later to separate the
- * connection ID from the flags. Be careful to ensure that the mask does not
- * overlap any of the bits used for flags, or ERTS will leak flags bits into
- * connection IDs and leak connection ID bits into the flags.
+ * These flags are stored in the ErtsDistExternal structure's flags field.
+ * They are used to indicate various bits of state necessary to decode binaries
+ * in a variety of scenarios.
*/
-#define ERTS_DIST_EXT_DFLAG_HDR ((Uint32) 0x80000000)
-#define ERTS_DIST_EXT_ATOM_TRANS_TAB ((Uint32) 0x40000000)
-#define ERTS_DIST_EXT_BTT_SAFE ((Uint32) 0x20000000)
-#define ERTS_DIST_EXT_CON_ID_MASK ((Uint32) 0x1fffffff)
+#define ERTS_DIST_EXT_DFLAG_HDR ((Uint32) 0x1)
+#define ERTS_DIST_EXT_ATOM_TRANS_TAB ((Uint32) 0x2)
+#define ERTS_DIST_EXT_BTT_SAFE ((Uint32) 0x4)
+
+#define ERTS_DIST_CON_ID_MASK ((Uint32) 0x00ffffff) /* also in net_kernel.erl */
-#define ERTS_DIST_EXT_CON_ID(DIST_EXTP) \
- ((DIST_EXTP)->flags & ERTS_DIST_EXT_CON_ID_MASK)
typedef struct {
DistEntry *dep;
byte *extp;
byte *ext_endp;
Sint heap_size;
+ Uint32 connection_id;
Uint32 flags;
ErtsAtomTranslationTable attab;
} ErtsDistExternal;
-typedef struct {
- int have_header;
- int cache_entries;
-} ErtsDistHeaderPeek;
-
#define ERTS_DIST_EXT_SIZE(EDEP) \
(sizeof(ErtsDistExternal) \
- (((EDEP)->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB) \
@@ -163,7 +157,7 @@ void erts_finalize_atom_cache_map(ErtsAtomCacheMap *, Uint32);
Uint erts_encode_ext_dist_header_size(ErtsAtomCacheMap *);
byte *erts_encode_ext_dist_header_setup(byte *, ErtsAtomCacheMap *);
-byte *erts_encode_ext_dist_header_finalize(byte *, ErtsAtomCache *, Uint32);
+Sint erts_encode_ext_dist_header_finalize(ErtsDistOutputBuf*, DistEntry *, Uint32 dflags, Sint reds);
struct erts_dsig_send_context;
int erts_encode_dist_ext_size(Eterm, Uint32, ErtsAtomCacheMap*, Uint* szp);
int erts_encode_dist_ext_size_int(Eterm term, struct erts_dsig_send_context* ctx, Uint* szp);
@@ -177,16 +171,18 @@ Uint erts_encode_ext_size_ets(Eterm);
void erts_encode_ext(Eterm, byte **);
byte* erts_encode_ext_ets(Eterm, byte *, struct erl_off_heap_header** ext_off_heap);
-#ifdef ERTS_WANT_EXTERNAL_TAGS
-ERTS_GLB_INLINE void erts_peek_dist_header(ErtsDistHeaderPeek *, byte *, Uint);
-#endif
ERTS_GLB_INLINE void erts_free_dist_ext_copy(ErtsDistExternal *);
ERTS_GLB_INLINE void *erts_dist_ext_trailer(ErtsDistExternal *);
ErtsDistExternal *erts_make_dist_ext_copy(ErtsDistExternal *, Uint);
void *erts_dist_ext_trailer(ErtsDistExternal *);
void erts_destroy_dist_ext_copy(ErtsDistExternal *);
+
+#define ERTS_PREP_DIST_EXT_FAILED (-1)
+#define ERTS_PREP_DIST_EXT_SUCCESS (0)
+#define ERTS_PREP_DIST_EXT_CLOSED (1)
+
int erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint,
- DistEntry *, ErtsAtomCache *);
+ DistEntry *, Uint32 conn_id, ErtsAtomCache *);
Sint erts_decode_dist_ext_size(ErtsDistExternal *);
Eterm erts_decode_dist_ext(ErtsHeapFactory* factory, ErtsDistExternal *);
@@ -202,23 +198,9 @@ void erts_binary2term_abort(ErtsBinary2TermState *);
Eterm erts_binary2term_create(ErtsBinary2TermState *, ErtsHeapFactory*);
int erts_debug_max_atom_out_cache_index(void);
int erts_debug_atom_to_out_cache_index(Eterm);
-
+void transcode_free_ctx(DistEntry* dep);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#ifdef ERTS_WANT_EXTERNAL_TAGS
-ERTS_GLB_INLINE void
-erts_peek_dist_header(ErtsDistHeaderPeek *dhpp, byte *ext, Uint sz)
-{
- if (ext[0] == VERSION_MAGIC
- || ext[1] != DIST_HEADER
- || sz < (1+1+1))
- dhpp->have_header = 0;
- else {
- dhpp->have_header = 1;
- dhpp->cache_entries = (int) get_int8(&ext[2]);
- }
-}
-#endif
ERTS_GLB_INLINE void
erts_free_dist_ext_copy(ErtsDistExternal *edep)
diff --git a/erts/emulator/beam/float_instrs.tab b/erts/emulator/beam/float_instrs.tab
new file mode 100644
index 0000000000..3d4db77892
--- /dev/null
+++ b/erts/emulator/beam/float_instrs.tab
@@ -0,0 +1,88 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+LOAD_DOUBLE(Src, Dst) {
+ GET_DOUBLE($Src, *(FloatDef *) &$Dst);
+}
+
+fload(Reg, Dst) {
+ $LOAD_DOUBLE($Reg, $Dst);
+}
+
+fstore(Float, Dst) {
+ PUT_DOUBLE(*((FloatDef *) &$Float), HTOP);
+ $Dst = make_float(HTOP);
+ HTOP += FLOAT_SIZE_OBJECT;
+}
+
+fconv(Src, Dst) {
+ Eterm src = $Src;
+
+ if (is_small(src)) {
+ $Dst = (double) signed_val(src);
+ } else if (is_big(src)) {
+ if (big_to_double(src, &$Dst) < 0) {
+ $BADARITH0();
+ }
+ } else if (is_float(src)) {
+ $LOAD_DOUBLE(src, $Dst);
+ } else {
+ $BADARITH0();
+ }
+}
+
+FLOAT_OP(Src1, OP, Src2, Dst) {
+ ERTS_NO_FPE_CHECK_INIT(c_p);
+ $Dst = $Src1 $OP $Src2;
+ ERTS_NO_FPE_ERROR(c_p, $Dst, $BADARITH0());
+}
+
+i_fadd(Src1, Src2, Dst) {
+ $FLOAT_OP($Src1, +, $Src2, $Dst);
+}
+
+i_fsub(Src1, Src2, Dst) {
+ $FLOAT_OP($Src1, -, $Src2, $Dst);
+}
+
+i_fmul(Src1, Src2, Dst) {
+ $FLOAT_OP($Src1, *, $Src2, $Dst);
+}
+
+i_fdiv(Src1, Src2, Dst) {
+ $FLOAT_OP($Src1, /, $Src2, $Dst);
+}
+
+i_fnegate(Src, Dst) {
+ ERTS_NO_FPE_CHECK_INIT(c_p);
+ $Dst = -$Src;
+ ERTS_NO_FPE_ERROR(c_p, $Dst, $BADARITH0());
+}
+
+%unless NO_FPE_SIGNALS
+fclearerror() {
+ ERTS_FP_CHECK_INIT(c_p);
+}
+
+i_fcheckerror() {
+ ERTS_FP_ERROR(c_p, freg[0].fd, $BADARITH0());
+}
+%endif
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index b2c76aa605..0631404599 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -42,9 +42,16 @@
#include "erl_utils.h"
#include "erl_port.h"
#include "erl_gc.h"
+#include "erl_nif.h"
+#define ERTS_BINARY_TYPES_ONLY__
+#include "erl_binary.h"
+#undef ERTS_BINARY_TYPES_ONLY__
struct enif_func_t;
+#ifdef DEBUG
+# define ERTS_NIF_ASSERT_IN_ENV
+#endif
struct enif_environment_t /* ErlNifEnv */
{
struct erl_module_nif* mod_nif;
@@ -57,27 +64,75 @@ struct enif_environment_t /* ErlNifEnv */
int exception_thrown; /* boolean */
Process *tracee;
int exiting; /* boolean (dirty nifs might return in exiting state) */
+
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ int dbg_disable_assert_in_env;
+#endif
};
+struct enif_resource_type_t
+{
+ struct enif_resource_type_t* next; /* list of all resource types */
+ struct enif_resource_type_t* prev;
+ struct erl_module_nif* owner; /* that created this type and thus implements the destructor*/
+ ErlNifResourceDtor* dtor; /* user destructor function */
+ ErlNifResourceStop* stop;
+ ErlNifResourceDown* down;
+ erts_refc_t refc; /* num of resources of this type (HOTSPOT warning)
+ +1 for active erl_module_nif */
+ Eterm module;
+ Eterm name;
+};
+
+typedef struct
+{
+ erts_mtx_t lock;
+ ErtsMonitor* root;
+ Uint refc;
+ size_t user_data_sz;
+} ErtsResourceMonitors;
+
+typedef struct ErtsResource_
+{
+ struct enif_resource_type_t* type;
+ ErtsResourceMonitors* monitors;
+#ifdef DEBUG
+ erts_refc_t nif_refc;
+#else
+# ifdef ARCH_32
+ byte align__[4];
+# endif
+#endif
+ char data[1];
+}ErtsResource;
+
+#define DATA_TO_RESOURCE(PTR) ErtsContainerStruct(PTR, ErtsResource, data)
+#define erts_resource_ref_size(P) ERTS_MAGIC_REF_THING_SIZE
+
+extern Eterm erts_bld_resource_ref(Eterm** hp, ErlOffHeap*, ErtsResource*);
+
extern void erts_pre_nif(struct enif_environment_t*, Process*,
struct erl_module_nif*, Process* tracee);
extern void erts_post_nif(struct enif_environment_t* env);
-#ifdef ERTS_DIRTY_SCHEDULERS
-extern void erts_pre_dirty_nif(ErtsSchedulerData *,
- struct enif_environment_t*, Process*,
- struct erl_module_nif*);
-extern void erts_post_dirty_nif(struct enif_environment_t* env);
-#endif
+extern void erts_resource_stop(ErtsResource*, ErlNifEvent, int is_direct_call);
+void erts_fire_nif_monitor(ErtsMonitor *tmon);
+void erts_nif_demonitored(ErtsResource* resource);
+extern void erts_add_taint(Eterm mod_atom);
extern Eterm erts_nif_taints(Process* p);
-extern void erts_print_nif_taints(int to, void* to_arg);
+extern void erts_print_nif_taints(fmtfn_t to, void* to_arg);
void erts_unload_nif(struct erl_module_nif* nif);
extern void erl_nif_init(void);
extern int erts_nif_get_funcs(struct erl_module_nif*,
struct enif_func_t **funcs);
+extern Module *erts_nif_get_module(struct erl_module_nif*);
extern Eterm erts_nif_call_function(Process *p, Process *tracee,
struct erl_module_nif*,
struct enif_func_t *,
int argc, Eterm *argv);
+int erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p,
+ BeamInstr *I, Eterm *reg);
+
+
/* Driver handle (wrapper for old plain handle) */
#define ERL_DE_OK 0
#define ERL_DE_UNLOAD 1
@@ -119,7 +174,7 @@ typedef struct de_proc_entry {
PROC_AWAIT_LOAD == Wants to be notified when we
reloaded the driver (old was locked) */
Uint flags; /* ERL_FL_DE_DEREFERENCED when reload in progress */
- Eterm heap[REF_THING_SIZE]; /* "ref heap" */
+ Eterm heap[ERTS_REF_THING_SIZE]; /* "ref heap" */
struct de_proc_entry *next;
} DE_ProcEntry;
@@ -129,7 +184,7 @@ typedef struct {
or that wait for it to change state */
erts_refc_t refc; /* Number of ports/processes having
references to the driver */
- erts_smp_atomic32_t port_count; /* Number of ports using the driver */
+ erts_atomic32_t port_count; /* Number of ports using the driver */
Uint flags; /* ERL_DE_FL_KILL_PORTS */
int status; /* ERL_DE_xxx */
char *full_path; /* Full path of the driver */
@@ -146,6 +201,7 @@ typedef struct {
struct erts_driver_t_ {
erts_driver_t *next;
erts_driver_t *prev;
+ Eterm name_atom;
char *name;
struct {
int major;
@@ -153,9 +209,7 @@ struct erts_driver_t_ {
} version;
int flags;
DE_Handle *handle;
-#ifdef ERTS_SMP
- erts_smp_mtx_t *lock;
-#endif
+ erts_mtx_t *lock;
ErlDrvEntry *entry;
ErlDrvData (*start)(ErlDrvPort port, char *command, SysDriverOpts* opts);
void (*stop)(ErlDrvData drv_data);
@@ -170,8 +224,6 @@ struct erts_driver_t_ {
char *buf, ErlDrvSizeT len,
char **rbuf, ErlDrvSizeT rlen, /* Might be NULL */
unsigned int *flags);
- void (*event)(ErlDrvData drv_data, ErlDrvEvent event,
- ErlDrvEventData event_data);
void (*ready_input)(ErlDrvData drv_data, ErlDrvEvent event);
void (*ready_output)(ErlDrvData drv_data, ErlDrvEvent event);
void (*timeout)(ErlDrvData drv_data);
@@ -182,7 +234,7 @@ struct erts_driver_t_ {
};
extern erts_driver_t *driver_list;
-extern erts_smp_rwmtx_t erts_driver_list_lock;
+extern erts_rwmtx_t erts_driver_list_lock;
extern void erts_ddll_init(void);
extern void erts_ddll_lock_driver(DE_Handle *dh, char *name);
@@ -207,118 +259,6 @@ extern Eterm erts_ddll_monitor_driver(Process *p,
ErtsProcLocks plocks);
/*
-** Just like the driver binary but with initial flags
-** Note that the two structures Binary and ErlDrvBinary HAVE to
-** be equal except for extra fields in the beginning of the struct.
-** ErlDrvBinary is defined in erl_driver.h.
-** When driver_alloc_binary is called, a Binary is allocated, but
-** the pointer returned is to the address of the first element that
-** also occurs in the ErlDrvBinary struct (driver.*binary takes care if this).
-** The driver need never know about additions to the internal Binary of the
-** emulator. One should however NEVER be sloppy when mixing ErlDrvBinary
-** and Binary, the macros below can convert one type to the other, as they both
-** in reality are equal.
-*/
-
-#ifdef ARCH_32
- /* *DO NOT USE* only for alignment. */
-#define ERTS_BINARY_STRUCT_ALIGNMENT Uint32 align__;
-#else
-#define ERTS_BINARY_STRUCT_ALIGNMENT
-#endif
-
-/* Add fields in ERTS_INTERNAL_BINARY_FIELDS, otherwise the drivers crash */
-#define ERTS_INTERNAL_BINARY_FIELDS \
- UWord flags; \
- erts_refc_t refc; \
- ERTS_BINARY_STRUCT_ALIGNMENT
-
-typedef struct binary {
- ERTS_INTERNAL_BINARY_FIELDS
- SWord orig_size;
- char orig_bytes[1]; /* to be continued */
-} Binary;
-
-#define ERTS_SIZEOF_Binary(Sz) \
- (offsetof(Binary,orig_bytes) + (Sz))
-
-typedef struct {
- ERTS_INTERNAL_BINARY_FIELDS
- SWord orig_size;
- void (*destructor)(Binary *);
- union {
- struct {
- ERTS_BINARY_STRUCT_ALIGNMENT
- char data[1];
- } aligned;
- struct {
- char data[1];
- } unaligned;
- } u;
-} ErtsMagicBinary;
-
-#ifdef ARCH_32
-#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN 4
-#else
-#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN 0
-#endif
-
-typedef union {
- Binary binary;
- ErtsMagicBinary magic_binary;
- struct {
- ERTS_INTERNAL_BINARY_FIELDS
- ErlDrvBinary binary;
- } driver;
-} ErtsBinary;
-
-/*
- * 'Binary' alignment:
- * Address of orig_bytes[0] of a Binary should always be 8-byte aligned.
- * It is assumed that the flags, refc, and orig_size fields are 4 bytes on
- * 32-bits architectures and 8 bytes on 64-bits architectures.
- */
-
-#define ERTS_MAGIC_BIN_DESTRUCTOR(BP) \
- ((ErtsBinary *) (BP))->magic_binary.destructor
-#define ERTS_MAGIC_BIN_DATA(BP) \
- ((void *) ((ErtsBinary *) (BP))->magic_binary.u.aligned.data)
-#define ERTS_MAGIC_DATA_OFFSET \
- (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
-#define ERTS_MAGIC_BIN_ORIG_SIZE(Sz) \
- (ERTS_MAGIC_DATA_OFFSET + (Sz))
-#define ERTS_MAGIC_BIN_SIZE(Sz) \
- (offsetof(ErtsMagicBinary,u.aligned.data) + (Sz))
-
-/* On 32-bit arch these macro variants will save memory
- by not forcing 8-byte alignment for the magic payload.
-*/
-#define ERTS_MAGIC_BIN_UNALIGNED_DATA(BP) \
- ((void *) ((ErtsBinary *) (BP))->magic_binary.u.unaligned.data)
-#define ERTS_MAGIC_UNALIGNED_DATA_OFFSET \
- (offsetof(ErtsMagicBinary,u.unaligned.data) - offsetof(Binary,orig_bytes))
-#define ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(BP) \
- ((BP)->orig_size - ERTS_MAGIC_UNALIGNED_DATA_OFFSET)
-#define ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(Sz) \
- (ERTS_MAGIC_UNALIGNED_DATA_OFFSET + (Sz))
-#define ERTS_MAGIC_BIN_UNALIGNED_SIZE(Sz) \
- (offsetof(ErtsMagicBinary,u.unaligned.data) + (Sz))
-#define ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(DATA) \
- ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.unaligned.data)))
-
-
-#define Binary2ErlDrvBinary(B) (&((ErtsBinary *) (B))->driver.binary)
-#define ErlDrvBinary2Binary(D) ((Binary *) \
- (((char *) (D)) \
- - offsetof(ErtsBinary, driver.binary)))
-
-/* A "magic" binary flag */
-#define BIN_FLAG_MAGIC 1
-#define BIN_FLAG_USR1 2 /* Reserved for use by different modules too mark */
-#define BIN_FLAG_USR2 4 /* certain binaries as special (used by ets) */
-#define BIN_FLAG_DRV 8
-
-/*
* This structure represents one type of a binary in a process.
*/
@@ -339,46 +279,12 @@ typedef struct proc_bin {
*/
#define PROC_BIN_SIZE (sizeof(ProcBin)/sizeof(Eterm))
-ERTS_GLB_INLINE Eterm erts_mk_magic_binary_term(Eterm **hpp,
- ErlOffHeap *ohp,
- Binary *mbp);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE Eterm
-erts_mk_magic_binary_term(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp)
-{
- ProcBin *pb = (ProcBin *) *hpp;
- *hpp += PROC_BIN_SIZE;
-
- ASSERT(mbp->flags & BIN_FLAG_MAGIC);
-
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = 0;
- pb->next = ohp->first;
- ohp->first = (struct erl_off_heap_header*) pb;
- pb->val = mbp;
- pb->bytes = (byte *) mbp->orig_bytes;
- pb->flags = 0;
-
- erts_refc_inc(&mbp->refc, 1);
-
- return make_binary(pb);
-}
-
-#endif
-
-#define ERTS_TERM_IS_MAGIC_BINARY(T) \
- (is_binary((T)) \
- && (thing_subtag(*binary_val((T))) == REFC_BINARY_SUBTAG) \
- && (((ProcBin *) binary_val((T)))->val->flags & BIN_FLAG_MAGIC))
-
-
union erl_off_heap_ptr {
struct erl_off_heap_header* hdr;
ProcBin *pb;
struct erl_fun_thing* fun;
struct external_thing_* ext;
+ ErtsMRefThing *mref;
Eterm* ep;
void* voidp;
};
@@ -389,7 +295,7 @@ extern Eterm node_cookie;
extern Uint display_items; /* no of items to display in traces etc */
extern int erts_backtrace_depth;
-extern erts_smp_atomic32_t erts_max_gen_gcs;
+extern erts_atomic32_t erts_max_gen_gcs;
extern int bif_reductions; /* reductions + fcalls (when doing call_bif) */
extern int stackdump_on_exit;
@@ -464,7 +370,7 @@ do {\
UWord _wsz = ESTACK_COUNT(s);\
(dst)->start = erts_alloc((s).alloc_type,\
DEF_ESTACK_SIZE * sizeof(Eterm));\
- memcpy((dst)->start, (s).start,_wsz*sizeof(Eterm));\
+ sys_memcpy((dst)->start, (s).start,_wsz*sizeof(Eterm));\
(dst)->sp = (dst)->start + _wsz;\
(dst)->end = (dst)->start + DEF_ESTACK_SIZE;\
(dst)->edefault = NULL;\
@@ -632,7 +538,7 @@ do {\
UWord _wsz = WSTACK_COUNT(s);\
(dst)->wstart = erts_alloc(s.alloc_type,\
DEF_WSTACK_SIZE * sizeof(UWord));\
- memcpy((dst)->wstart, s.wstart,_wsz*sizeof(UWord));\
+ sys_memcpy((dst)->wstart, s.wstart,_wsz*sizeof(UWord));\
(dst)->wsp = (dst)->wstart + _wsz;\
(dst)->wend = (dst)->wstart + DEF_WSTACK_SIZE;\
(dst)->wdefault = NULL;\
@@ -773,8 +679,8 @@ do { \
typedef struct ErtsPStack_ {
byte* pstart;
- byte* psp;
- byte* pend;
+ int offs; /* "stack pointer" as byte offset from pstart */
+ int size; /* allocated size in bytes */
ErtsAlcType_t alloc_type;
}ErtsPStack;
@@ -785,8 +691,8 @@ void erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes);
#define PSTACK_DECLARE(s, DEF_PSTACK_SIZE) \
PSTACK_TYPE PSTK_DEF_STACK(s)[DEF_PSTACK_SIZE]; \
ErtsPStack s = { (byte*)PSTK_DEF_STACK(s), /* pstart */ \
- (byte*)(PSTK_DEF_STACK(s) - 1), /* psp */ \
- (byte*)(PSTK_DEF_STACK(s) + (DEF_PSTACK_SIZE)), /* pend */\
+ -(int)sizeof(PSTACK_TYPE), /* offs */ \
+ DEF_PSTACK_SIZE*sizeof(PSTACK_TYPE), /* size */ \
ERTS_ALC_T_ESTACK /* alloc_type */ \
}
@@ -806,19 +712,21 @@ do { \
} \
} while(0)
-#define PSTACK_IS_EMPTY(s) (s.psp < s.pstart)
+#define PSTACK_IS_EMPTY(s) (s.offs < 0)
-#define PSTACK_COUNT(s) (((PSTACK_TYPE*)s.psp + 1) - (PSTACK_TYPE*)s.pstart)
+#define PSTACK_COUNT(s) ((s.offs + sizeof(PSTACK_TYPE)) / sizeof(PSTACK_TYPE))
-#define PSTACK_TOP(s) (ASSERT(!PSTACK_IS_EMPTY(s)), (PSTACK_TYPE*)(s.psp))
+#define PSTACK_TOP(s) (ASSERT(!PSTACK_IS_EMPTY(s)), \
+ (PSTACK_TYPE*)(s.pstart + s.offs))
-#define PSTACK_PUSH(s) \
- (s.psp += sizeof(PSTACK_TYPE), \
- ((s.psp == s.pend) ? erl_grow_pstack(&s, PSTK_DEF_STACK(s), \
- sizeof(PSTACK_TYPE)) : (void)0), \
- ((PSTACK_TYPE*) s.psp))
+#define PSTACK_PUSH(s) \
+ (s.offs += sizeof(PSTACK_TYPE), \
+ ((s.offs == s.size) ? erl_grow_pstack(&s, PSTK_DEF_STACK(s), \
+ sizeof(PSTACK_TYPE)) : (void)0), \
+ ((PSTACK_TYPE*) (s.pstart + s.offs)))
-#define PSTACK_POP(s) ((PSTACK_TYPE*) (s.psp -= sizeof(PSTACK_TYPE)))
+#define PSTACK_POP(s) ((s.offs -= sizeof(PSTACK_TYPE)), \
+ (PSTACK_TYPE*)(s.pstart + s.offs))
/*
* Do not free the stack after this, it may have pointers into what
@@ -831,8 +739,8 @@ do {\
(dst)->pstart = erts_alloc(s.alloc_type,\
sizeof(PSTK_DEF_STACK(s)));\
sys_memcpy((dst)->pstart, s.pstart, _pbytes);\
- (dst)->psp = (dst)->pstart + _pbytes - sizeof(PSTACK_TYPE);\
- (dst)->pend = (dst)->pstart + sizeof(PSTK_DEF_STACK(s));\
+ (dst)->offs = s.offs;\
+ (dst)->size = s.size;\
(dst)->alloc_type = s.alloc_type;\
} else\
*(dst) = s;\
@@ -847,8 +755,8 @@ do { \
ASSERT(s.pstart == (byte*)PSTK_DEF_STACK(s)); \
s = *(src); /* struct copy */ \
(src)->pstart = NULL; \
- ASSERT(s.psp >= (s.pstart - sizeof(PSTACK_TYPE))); \
- ASSERT(s.psp < s.pend); \
+ ASSERT(s.offs >= -(int)sizeof(PSTACK_TYPE)); \
+ ASSERT(s.offs < s.size); \
} while (0)
#define PSTACK_DESTROY_SAVED(pstack)\
@@ -954,7 +862,10 @@ void erts_emasculate_writable_binary(ProcBin* pb);
Eterm erts_new_heap_binary(Process *p, byte *buf, int len, byte** datap);
Eterm erts_new_mso_binary(Process*, byte*, Uint);
Eterm new_binary(Process*, byte*, Uint);
+Eterm erts_heap_factory_new_binary(ErtsHeapFactory *hfact, byte *buf,
+ Uint len, Uint reserve_size);
Eterm erts_realloc_binary(Eterm bin, size_t size);
+Eterm erts_build_proc_bin(ErlOffHeap*, Eterm*, Binary*);
/* erl_bif_info.c */
@@ -969,21 +880,6 @@ void erts_bif_info_init(void);
/* bif.c */
-ERTS_GLB_INLINE Eterm
-erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE Eterm
-erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS])
-{
- Eterm *hp = HAlloc(c_p, REF_THING_SIZE);
- write_ref_thing(hp, ref[0], ref[1], ref[2]);
- return make_internal_ref(hp);
-}
-
-#endif
-
void erts_queue_monitor_message(Process *,
ErtsProcLocks*,
Eterm,
@@ -991,21 +887,18 @@ void erts_queue_monitor_message(Process *,
Eterm,
Eterm);
void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
- Eterm (*bif)(Process*,Eterm*));
+ Eterm (*bif)(Process*, Eterm*, BeamInstr*));
void erts_init_bif(void);
Eterm erl_send(Process *p, Eterm to, Eterm msg);
+int erts_set_group_leader(Process *proc, Eterm new_gl);
/* erl_bif_op.c */
Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
/* beam_bif_load.c */
-#define ERTS_CPC_ALLOW_GC (1 << 0)
-#define ERTS_CPC_ALL ERTS_CPC_ALLOW_GC
-Eterm erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls);
-#ifdef ERTS_NEW_PURGE_STRATEGY
+Eterm erts_check_process_code(Process *c_p, Eterm module, int *redsp, int fcalls);
Eterm erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed);
-#endif
typedef struct ErtsLiteralArea_ {
struct erl_off_heap_header *off_heap;
@@ -1013,25 +906,21 @@ typedef struct ErtsLiteralArea_ {
Eterm start[1]; /* beginning of area */
} ErtsLiteralArea;
+void erts_queue_release_literals(Process *c_p, ErtsLiteralArea* literals);
+
#define ERTS_LITERAL_AREA_ALLOC_SIZE(N) \
(sizeof(ErtsLiteralArea) + sizeof(Eterm)*((N) - 1))
-extern erts_smp_atomic_t erts_copy_literal_area__;
+extern erts_atomic_t erts_copy_literal_area__;
#define ERTS_COPY_LITERAL_AREA() \
- ((ErtsLiteralArea *) erts_smp_atomic_read_nob(&erts_copy_literal_area__))
-
-#ifdef ERTS_NEW_PURGE_STRATEGY
+ ((ErtsLiteralArea *) erts_atomic_read_nob(&erts_copy_literal_area__))
extern Process *erts_literal_area_collector;
-#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
-extern Process *erts_dirty_process_code_checker;
-#endif
extern Process *erts_code_purger;
/* beam_load.c */
typedef struct {
- BeamInstr* current; /* Pointer to: Mod, Name, Arity */
+ ErtsCodeMFA* mfa; /* Pointer to: Mod, Name, Arity */
Uint needed; /* Heap space needed for entire tuple */
Uint32 loc; /* Location in source code */
Eterm* fname_ptr; /* Pointer to fname table */
@@ -1048,13 +937,14 @@ Eterm erts_finish_loading(Binary* loader_state, Process* c_p,
Eterm erts_preload_module(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm* mod, byte* code, Uint size);
void init_load(void);
-BeamInstr* find_function_from_pc(BeamInstr* pc);
+ErtsCodeMFA* find_function_from_pc(BeamInstr* pc);
Eterm* erts_build_mfa_item(FunctionInfo* fi, Eterm* hp,
Eterm args, Eterm* mfa_p);
-void erts_set_current_function(FunctionInfo* fi, BeamInstr* current);
+void erts_set_current_function(FunctionInfo* fi, ErtsCodeMFA* mfa);
Eterm erts_module_info_0(Process* p, Eterm module);
Eterm erts_module_info_1(Process* p, Eterm module, Eterm what);
Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info);
+int erts_commit_hipe_patch_load(Eterm hipe_magic_bin);
/* beam_ranges.c */
void erts_init_ranges(void);
@@ -1064,21 +954,28 @@ void erts_update_ranges(BeamInstr* code, Uint size);
void erts_remove_from_ranges(BeamInstr* code);
UWord erts_ranges_sz(void);
void erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info);
+extern ErtsLiteralArea** erts_dump_lit_areas;
+extern Uint erts_dump_num_lit_areas;
/* break.c */
void init_break_handler(void);
void erts_set_ignore_break(void);
void erts_replace_intr(void);
-void process_info(int, void *);
-void print_process_info(int, void *, Process*);
-void info(int, void *);
-void loaded(int, void *);
+void process_info(fmtfn_t, void *);
+void print_process_info(fmtfn_t, void *, Process*);
+void info(fmtfn_t, void *);
+void loaded(fmtfn_t, void *);
+void erts_print_base64(fmtfn_t to, void *to_arg, byte* src, Uint size);
+
+/* sighandler sys.c */
+int erts_set_signal(Eterm signal, Eterm type);
/* erl_arith.c */
double erts_get_positive_zero_float(void);
/* config.c */
+__decl_noreturn void __noreturn erts_exit_epilogue(void);
__decl_noreturn void __noreturn erts_exit(int n, char*, ...);
__decl_noreturn void __noreturn erts_flush_async_exit(int n, char*, ...);
void erl_error(char*, va_list);
@@ -1104,26 +1001,28 @@ typedef struct {
Eterm* shtable_start;
ErtsAlcType_t shtable_alloc_type;
Uint literal_size;
- Eterm *range_ptr;
- Uint range_sz;
+ Eterm *lit_purge_ptr;
+ Uint lit_purge_sz;
+ int copy_literals;
} erts_shcopy_t;
-#define INITIALIZE_SHCOPY(info) \
-do { \
- ErtsLiteralArea *larea__ = ERTS_COPY_LITERAL_AREA();\
- info.queue_start = info.queue_default; \
- info.bitstore_start = info.bitstore_default; \
- info.shtable_start = info.shtable_default; \
- info.literal_size = 0; \
- if (larea__) { \
- info.range_ptr = &larea__->start[0]; \
- info.range_sz = larea__->end - info.range_ptr; \
- } \
- else { \
- info.range_ptr = NULL; \
- info.range_sz = 0; \
- } \
-} while(0)
+#define INITIALIZE_SHCOPY(info) \
+ do { \
+ ErtsLiteralArea *larea__ = ERTS_COPY_LITERAL_AREA(); \
+ info.queue_start = info.queue_default; \
+ info.bitstore_start = info.bitstore_default; \
+ info.shtable_start = info.shtable_default; \
+ info.literal_size = 0; \
+ info.copy_literals = 0; \
+ if (larea__) { \
+ info.lit_purge_ptr = &larea__->start[0]; \
+ info.lit_purge_sz = larea__->end - info.lit_purge_ptr; \
+ } \
+ else { \
+ info.lit_purge_ptr = NULL; \
+ info.lit_purge_sz = 0; \
+ } \
+ } while(0)
#define DESTROY_SHCOPY(info) \
do { \
@@ -1139,32 +1038,56 @@ do { \
} while(0)
/* copy.c */
+typedef struct {
+ Eterm *lit_purge_ptr;
+ Uint lit_purge_sz;
+} erts_literal_area_t;
+
+#define INITIALIZE_LITERAL_PURGE_AREA(Area) \
+ do { \
+ ErtsLiteralArea *larea__ = ERTS_COPY_LITERAL_AREA(); \
+ if (larea__) { \
+ (Area).lit_purge_ptr = &larea__->start[0]; \
+ (Area).lit_purge_sz = larea__->end - (Area).lit_purge_ptr; \
+ } \
+ else { \
+ (Area).lit_purge_ptr = NULL; \
+ (Area).lit_purge_sz = 0; \
+ } \
+ } while(0)
+
Eterm copy_object_x(Eterm, Process*, Uint);
#define copy_object(Term, Proc) copy_object_x(Term,Proc,0)
-Uint size_object(Eterm);
+Uint size_object_x(Eterm, erts_literal_area_t*);
+#define size_object(Term) size_object_x(Term,NULL)
+#define size_object_litopt(Term,LitArea) size_object_x(Term,LitArea)
+
Uint copy_shared_calculate(Eterm, erts_shcopy_t*);
Eterm copy_shared_perform(Eterm, Uint, erts_shcopy_t*, Eterm**, ErlOffHeap*);
Uint size_shared(Eterm);
-Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint* bsz);
+Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint*, erts_literal_area_t*);
#define copy_struct(Obj,Sz,HPP,OH) \
- copy_struct_x(Obj,Sz,HPP,OH,NULL)
-Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*);
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,NULL)
+#define copy_struct_litopt(Obj,Sz,HPP,OH,LitArea) \
+ copy_struct_x(Obj,Sz,HPP,OH,NULL,LitArea)
+
+Eterm copy_shallow(Eterm* ERTS_RESTRICT, Uint, Eterm**, ErlOffHeap*);
void erts_move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first,
Eterm* refs, unsigned nrefs, int literals);
/* Utilities */
-extern void erts_delete_nodes_monitors(Process *, ErtsProcLocks);
+void erts_monitor_nodes_delete(ErtsMonitor *);
extern Eterm erts_monitor_nodes(Process *, Eterm, Eterm);
extern Eterm erts_processes_monitoring_nodes(Process *);
extern int erts_do_net_exits(DistEntry*, Eterm);
-extern int distribution_info(int, void *);
+extern int distribution_info(fmtfn_t, void *);
extern int is_node_name_atom(Eterm a);
-extern int erts_net_message(Port *, DistEntry *,
+extern int erts_net_message(Port *, DistEntry *, Uint32 conn_id,
byte *, ErlDrvSizeT, byte *, ErlDrvSizeT);
extern void init_dist(void);
@@ -1179,7 +1102,7 @@ void print_pass_through(int, byte*, int);
/* beam_emu.c */
int catchlevel(Process*);
void init_emulator(void);
-void process_main(void);
+void process_main(Eterm* x_reg_array, FloatDef* f_reg_array);
void erts_dirty_process_main(ErtsSchedulerData *);
Eterm build_stacktrace(Process* c_p, Eterm exc);
Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
@@ -1190,7 +1113,6 @@ void erts_save_stacktrace(Process* p, struct StackTrace* s, int depth);
typedef struct {
Eterm delay_time;
int context_reds;
- int input_reds;
} ErtsModifiedTimings;
extern Export *erts_delay_trap;
@@ -1208,15 +1130,12 @@ extern ErtsModifiedTimings erts_modified_timings[];
extern int erts_no_line_info;
extern Eterm erts_error_logger_warnings;
extern int erts_initialized;
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-extern erts_tid_t erts_main_thread;
-#endif
extern int erts_compat_rel;
-extern int erts_use_sender_punish;
-void erts_short_init(void);
void erl_start(int, char**);
void erts_usage(void);
Eterm erts_preloaded(Process* p);
+
+
/* erl_md5.c */
typedef struct {
@@ -1240,7 +1159,7 @@ typedef struct {
#define ERTS_SPAWN_DRIVER 1
#define ERTS_SPAWN_EXECUTABLE 2
#define ERTS_SPAWN_ANY (ERTS_SPAWN_DRIVER | ERTS_SPAWN_EXECUTABLE)
-int erts_add_driver_entry(ErlDrvEntry *drv, DE_Handle *handle, int driver_list_locked);
+int erts_add_driver_entry(ErlDrvEntry *drv, DE_Handle *handle, int driver_list_locked, int taint);
void erts_destroy_driver(erts_driver_t *drv);
int erts_save_suspend_process_on_port(Port*, Process*);
Port *erts_open_driver(erts_driver_t*, Eterm, char*, SysDriverOpts*, int *, int *);
@@ -1256,14 +1175,26 @@ void erts_stale_drv_select(Eterm, ErlDrvPort, ErlDrvEvent, int, int);
Port *erts_get_heart_port(void);
void erts_emergency_close_ports(void);
+void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon);
+Eterm erts_driver_monitor_to_ref(Eterm* hp, const ErlDrvMonitor *mon);
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
-void erts_lcnt_enable_io_lock_count(int enable);
+#if defined(ERTS_ENABLE_LOCK_COUNT)
+void erts_lcnt_update_driver_locks(int enable);
+void erts_lcnt_update_port_locks(int enable);
#endif
/* driver_tab.c */
+typedef struct {
+ ErlDrvEntry* de;
+ int taint;
+} ErtsStaticDriver;
typedef void *(*ErtsStaticNifInitFPtr)(void);
-ErtsStaticNifInitFPtr erts_static_nif_get_nif_init(const char *name, int len);
+typedef struct ErtsStaticNifEntry_ {
+ const char *nif_name;
+ ErtsStaticNifInitFPtr nif_init;
+ int taint;
+} ErtsStaticNifEntry;
+ErtsStaticNifEntry* erts_static_nif_get_nif_init(const char *name, int len);
int erts_is_static_nif(void *handle);
void erts_init_static_drivers(void);
@@ -1277,6 +1208,11 @@ Uint64 erts_timestamp_millis(void);
Export* erts_find_function(Eterm, Eterm, unsigned int, ErtsCodeIndex);
+void *erts_calc_stacklimit(char *prev_c, UWord stacksize);
+int erts_check_below_limit(char *ptr, char *limit);
+int erts_check_above_limit(char *ptr, char *limit);
+void *erts_ptr_id(void *ptr);
+
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
@@ -1306,12 +1242,24 @@ Sint erts_re_set_loop_limit(Sint limit);
void erts_init_bif_binary(void);
Sint erts_binary_set_loop_limit(Sint limit);
+/* erl_bif_persistent.c */
+void erts_init_bif_persistent_term(void);
+Uint erts_persistent_term_count(void);
+void erts_init_persistent_dumping(void);
+extern ErtsLiteralArea** erts_persistent_areas;
+extern Uint erts_num_persistent_areas;
+
/* external.c */
void erts_init_external(void);
/* erl_map.c */
void erts_init_map(void);
+/* beam_debug.c */
+UWord erts_check_stack_recursion_downwards(char *start_c);
+UWord erts_check_stack_recursion_upwards(char *start_c);
+int erts_is_above_stack_limit(char *ptr);
+
/* erl_unicode.c */
void erts_init_unicode(void);
Sint erts_unicode_set_loop_limit(Sint limit);
@@ -1340,7 +1288,7 @@ char* erts_convert_filename_to_wchar(byte* bytes, Uint size,
char *statbuf, size_t statbuf_size,
ErtsAlcType_t alloc_type, Sint* used,
Uint extra_wchars);
-Eterm erts_convert_native_to_filename(Process *p, byte *bytes);
+Eterm erts_convert_native_to_filename(Process *p, size_t size, byte *bytes);
Eterm erts_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz, Uint left,
Uint *num_built, Uint *num_eaten, Eterm tail);
int erts_utf8_to_latin1(byte* dest, const byte* source, int slen);
@@ -1350,8 +1298,10 @@ int erts_utf8_to_latin1(byte* dest, const byte* source, int slen);
#define ERTS_UTF8_ANALYZE_MORE 3
#define ERTS_UTF8_OK_MAX_CHARS 4
-void bin_write(int, void*, byte*, size_t);
+void bin_write(fmtfn_t, void*, byte*, size_t);
Sint intlist_to_buf(Eterm, char*, Sint); /* most callers pass plain char*'s */
+int erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written);
+Sint erts_unicode_list_to_buf_len(Eterm list);
struct Sint_buf {
#if defined(ARCH_64)
@@ -1452,23 +1402,11 @@ Eterm erts_gc_bor(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_bxor(Process* p, Eterm* reg, Uint live);
Eterm erts_gc_bnot(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_length_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_bit_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_float_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_binary_part_3(Process* p, Eterm* reg, Uint live);
-Eterm erts_gc_binary_part_2(Process* p, Eterm* reg, Uint live);
-
Uint erts_current_reductions(Process* current, Process *p);
-int erts_print_system_version(int to, void *arg, Process *c_p);
+int erts_print_system_version(fmtfn_t to, void *arg, Process *c_p);
-int erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg);
+int erts_hibernate(Process* c_p, Eterm* reg);
ERTS_GLB_FORCE_INLINE int erts_is_literal(Eterm tptr, Eterm *ptr);
@@ -1500,21 +1438,20 @@ Eterm erts_msacc_request(Process *c_p, int action, Eterm *threads);
#define MatchSetRef(MPSP) \
do { \
if ((MPSP) != NULL) { \
- erts_refc_inc(&(MPSP)->refc, 1); \
+ erts_refc_inc(&(MPSP)->intern.refc, 1); \
} \
} while (0)
#define MatchSetUnref(MPSP) \
do { \
- if (((MPSP) != NULL) && erts_refc_dectest(&(MPSP)->refc, 0) <= 0) { \
- erts_bin_free(MPSP); \
+ if (((MPSP) != NULL)) { \
+ erts_bin_release(MPSP); \
} \
} while(0)
#define MatchSetGetSource(MPSP) erts_match_set_get_source(MPSP)
extern Binary *erts_match_set_compile(Process *p, Eterm matchexpr, Eterm MFA);
-Eterm erts_match_set_lint(Process *p, Eterm matchexpr);
extern void erts_match_set_release_result(Process* p);
ERTS_GLB_INLINE void erts_match_set_release_result_trace(Process* p, Eterm);
@@ -1572,8 +1509,7 @@ int erts_beam_jump_table(void);
ERTS_GLB_INLINE void dtrace_pid_str(Eterm pid, char *process_buf);
ERTS_GLB_INLINE void dtrace_proc_str(Process *process, char *process_buf);
ERTS_GLB_INLINE void dtrace_port_str(Port *port, char *port_buf);
-ERTS_GLB_INLINE void dtrace_fun_decode(Process *process,
- Eterm module, Eterm function, int arity,
+ERTS_GLB_INLINE void dtrace_fun_decode(Process *process, ErtsCodeMFA *mfa,
char *process_buf, char *mfa_buf);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -1607,8 +1543,7 @@ dtrace_port_str(Port *port, char *port_buf)
}
ERTS_GLB_INLINE void
-dtrace_fun_decode(Process *process,
- Eterm module, Eterm function, int arity,
+dtrace_fun_decode(Process *process, ErtsCodeMFA *mfa,
char *process_buf, char *mfa_buf)
{
if (process_buf) {
@@ -1616,7 +1551,7 @@ dtrace_fun_decode(Process *process,
}
erts_snprintf(mfa_buf, DTRACE_TERM_BUF_SIZE, "%T:%T/%d",
- module, function, arity);
+ mfa->module, mfa->function, mfa->arity);
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/hash.c b/erts/emulator/beam/hash.c
index cd038d100b..8954dbb06c 100644
--- a/erts/emulator/beam/hash.c
+++ b/erts/emulator/beam/hash.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -95,7 +95,7 @@ void hash_get_info(HashInfo *hi, Hash *h)
**
*/
-void hash_info(int to, void *arg, Hash* h)
+void hash_info(fmtfn_t to, void *arg, Hash* h)
{
HashInfo hi;
@@ -152,7 +152,7 @@ Hash* hash_init(int type, Hash* h, char* name, int size, HashFunctions fun)
h->bucket = (HashBucket**) fun.meta_alloc(h->meta_alloc_type, sz);
- sys_memzero(h->bucket, sz);
+ memzero(h->bucket, sz);
h->is_allocated = 0;
h->name = name;
h->fun = fun;
@@ -224,7 +224,7 @@ static void rehash(Hash* h, int grow)
sz = h->size*sizeof(HashBucket*);
new_bucket = (HashBucket **) h->fun.meta_alloc(h->meta_alloc_type, sz);
- sys_memzero(new_bucket, sz);
+ memzero(new_bucket, sz);
for (i = 0; i < old_size; i++) {
HashBucket* b = h->bucket[i];
diff --git a/erts/emulator/beam/hash.h b/erts/emulator/beam/hash.h
index 4e769c0119..d319aaca83 100644
--- a/erts/emulator/beam/hash.h
+++ b/erts/emulator/beam/hash.h
@@ -37,7 +37,7 @@ typedef void (*HFREE_FUN)(void*);
/* Meta functions */
typedef void* (*HMALLOC_FUN)(int,size_t);
typedef void (*HMFREE_FUN)(int,void*);
-typedef int (*HMPRINT_FUN)(int,void*,char*, ...);
+typedef int (*HMPRINT_FUN)(fmtfn_t,void*,char*, ...);
/*
** This bucket must be placed in top of
@@ -89,7 +89,7 @@ Hash* hash_init(int, Hash*, char*, int, HashFunctions);
void hash_delete(Hash*);
void hash_get_info(HashInfo*, Hash*);
-void hash_info(int, void *, Hash*);
+void hash_info(fmtfn_t, void *, Hash*);
int hash_table_sz(Hash *);
void* hash_get(Hash*, void*);
diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c
index 26d6c04ea0..be1771b037 100644
--- a/erts/emulator/beam/index.c
+++ b/erts/emulator/beam/index.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,7 +27,7 @@
#include "global.h"
#include "index.h"
-void index_info(int to, void *arg, IndexTable *t)
+void index_info(fmtfn_t to, void *arg, IndexTable *t)
{
hash_info(to, arg, &t->htable);
erts_print(to, arg, "=index_table:%s\n", t->htable.name);
@@ -58,7 +58,7 @@ IndexTable*
erts_index_init(ErtsAlcType_t type, IndexTable* t, char* name,
int size, int limit, HashFunctions fun)
{
- Uint base_size = ((limit+INDEX_PAGE_SIZE-1)/INDEX_PAGE_SIZE)*sizeof(IndexSlot*);
+ Uint base_size = (((Uint)limit+INDEX_PAGE_SIZE-1)/INDEX_PAGE_SIZE)*sizeof(IndexSlot*);
hash_init(type, &t->htable, name, 3*size/4, fun);
t->size = 0;
@@ -91,9 +91,16 @@ index_put_entry(IndexTable* t, void* tmpl)
t->seg_table[ix>>INDEX_PAGE_SHIFT] = erts_alloc(t->type, sz);
t->size += INDEX_PAGE_SIZE;
}
- t->entries++;
p->index = ix;
t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK] = p;
+
+ /*
+ * Do a write barrier here to allow readers to do lock free iteration.
+ * erts_index_num_entries() does matching read barrier.
+ */
+ ERTS_THR_WRITE_MEMORY_BARRIER;
+ t->entries++;
+
return p;
}
diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h
index 0a109d8699..30bc6a1121 100644
--- a/erts/emulator/beam/index.h
+++ b/erts/emulator/beam/index.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -51,7 +51,7 @@ typedef struct index_table
#define INDEX_PAGE_MASK ((1 << INDEX_PAGE_SHIFT)-1)
IndexTable *erts_index_init(ErtsAlcType_t,IndexTable*,char*,int,int,HashFunctions);
-void index_info(int, void *, IndexTable*);
+void index_info(fmtfn_t, void *, IndexTable*);
int index_table_sz(IndexTable *);
int index_get(IndexTable*, void*);
@@ -65,6 +65,7 @@ void index_erase_latest_from(IndexTable*, Uint ix);
ERTS_GLB_INLINE int index_put(IndexTable*, void*);
ERTS_GLB_INLINE IndexSlot* erts_index_lookup(IndexTable*, Uint);
+ERTS_GLB_INLINE int erts_index_num_entries(IndexTable* t);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -78,6 +79,19 @@ erts_index_lookup(IndexTable* t, Uint ix)
{
return t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK];
}
+
+ERTS_GLB_INLINE int erts_index_num_entries(IndexTable* t)
+{
+ int ret = t->entries;
+ /*
+ * Do a read barrier here to allow lock free iteration
+ * on tables where entries are never erased.
+ * index_put_entry() does matching write barrier.
+ */
+ ERTS_THR_READ_MEMORY_BARRIER;
+ return ret;
+}
+
#endif
#endif
diff --git a/erts/emulator/beam/instrs.tab b/erts/emulator/beam/instrs.tab
new file mode 100644
index 0000000000..42c1168f85
--- /dev/null
+++ b/erts/emulator/beam/instrs.tab
@@ -0,0 +1,973 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017-2018. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+// Stack manipulation instructions
+
+allocate(NeedStack, Live) {
+ $AH($NeedStack, 0, $Live);
+}
+
+allocate_heap(NeedStack, NeedHeap, Live) {
+ $AH($NeedStack, $NeedHeap, $Live);
+}
+
+allocate_init(NeedStack, Live, Y) {
+ $AH($NeedStack, 0, $Live);
+ make_blank($Y);
+}
+
+allocate_zero(NeedStack, Live) {
+ Eterm* ptr;
+ int i = $NeedStack;
+ $AH(i, 0, $Live);
+ for (ptr = E + i; ptr > E; ptr--) {
+ make_blank(*ptr);
+ }
+}
+
+allocate_heap_zero(NeedStack, NeedHeap, Live) {
+ Eterm* ptr;
+ int i = $NeedStack;
+ $AH(i, $NeedHeap, $Live);
+ for (ptr = E + i; ptr > E; ptr--) {
+ make_blank(*ptr);
+ }
+}
+
+// This instruction is probably never used (because it is combined with a
+// a return). However, a future compiler might for some reason emit a
+// deallocate not followed by a return, and that should work.
+
+deallocate(Deallocate) {
+ //| -no_prefetch
+ SET_CP(c_p, (BeamInstr *) cp_val(*E));
+ E = ADD_BYTE_OFFSET(E, $Deallocate);
+}
+
+deallocate_return(Deallocate) {
+ //| -no_next
+ int words_to_pop = $Deallocate;
+ SET_I((BeamInstr *) cp_val(*E));
+ E = ADD_BYTE_OFFSET(E, words_to_pop);
+ CHECK_TERM(x(0));
+ DispatchReturn;
+}
+
+move_deallocate_return(Src, Deallocate) {
+ x(0) = $Src;
+ $deallocate_return($Deallocate);
+}
+
+// Call instructions
+
+DISPATCH_REL(CallDest) {
+ //| -no_next
+ $SET_I_REL($CallDest);
+ DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I));
+ Dispatch();
+}
+
+DISPATCH_ABS(CallDest) {
+ //| -no_next
+ SET_I((BeamInstr *) $CallDest);
+ DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I));
+ Dispatch();
+}
+
+i_call(CallDest) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_REL($CallDest);
+}
+
+move_call(Src, CallDest) {
+ x(0) = $Src;
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_REL($CallDest);
+}
+
+i_call_last(CallDest, Deallocate) {
+ $deallocate($Deallocate);
+ $DISPATCH_REL($CallDest);
+}
+
+move_call_last(Src, CallDest, Deallocate) {
+ x(0) = $Src;
+ $i_call_last($CallDest, $Deallocate);
+}
+
+i_call_only(CallDest) {
+ $DISPATCH_REL($CallDest);
+}
+
+move_call_only(Src, CallDest) {
+ x(0) = $Src;
+ $i_call_only($CallDest);
+}
+
+DISPATCHX(Dest) {
+ //| -no_next
+ DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, $Dest);
+ // Dispatchx assumes the Export* is in Arg(0)
+ I = (&$Dest) - 1;
+ Dispatchx();
+}
+
+i_call_ext(Dest) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCHX($Dest);
+}
+
+i_move_call_ext(Src, Dest) {
+ x(0) = $Src;
+ $i_call_ext($Dest);
+}
+
+i_call_ext_only(Dest) {
+ $DISPATCHX($Dest);
+}
+
+i_move_call_ext_only(Dest, Src) {
+ x(0) = $Src;
+ $i_call_ext_only($Dest);
+}
+
+i_call_ext_last(Dest, Deallocate) {
+ $deallocate($Deallocate);
+ $DISPATCHX($Dest);
+}
+
+i_move_call_ext_last(Dest, StackOffset, Src) {
+ x(0) = $Src;
+ $i_call_ext_last($Dest, $StackOffset);
+}
+
+APPLY(I, Deallocate, Next) {
+ //| -no_next
+ HEAVY_SWAPOUT;
+ $Next = apply(c_p, reg, $I, $Deallocate);
+ HEAVY_SWAPIN;
+}
+
+HANDLE_APPLY_ERROR() {
+ I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa);
+ goto post_error_handling;
+}
+
+i_apply() {
+ BeamInstr *next;
+ $APPLY(NULL, 0, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_ABS(next);
+ }
+ $HANDLE_APPLY_ERROR();
+}
+
+i_apply_last(Deallocate) {
+ BeamInstr *next;
+ $APPLY(I, $Deallocate, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $deallocate($Deallocate);
+ $DISPATCH_ABS(next);
+ }
+ $HANDLE_APPLY_ERROR();
+}
+
+i_apply_only() {
+ BeamInstr *next;
+ $APPLY(I, 0, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $DISPATCH_ABS(next);
+ }
+ $HANDLE_APPLY_ERROR();
+}
+
+FIXED_APPLY(Arity, I, Deallocate, Next) {
+ //| -no_next
+ HEAVY_SWAPOUT;
+ $Next = fixed_apply(c_p, reg, $Arity, $I, $Deallocate);
+ HEAVY_SWAPIN;
+}
+
+apply(Arity) {
+ BeamInstr *next;
+ $FIXED_APPLY($Arity, NULL, 0, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_ABS(next);
+ }
+ $HANDLE_APPLY_ERROR();
+}
+
+apply_last(Arity, Deallocate) {
+ BeamInstr *next;
+ $FIXED_APPLY($Arity, I, $Deallocate, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $deallocate($Deallocate);
+ $DISPATCH_ABS(next);
+ }
+ $HANDLE_APPLY_ERROR();
+}
+
+APPLY_FUN(Next) {
+ HEAVY_SWAPOUT;
+ $Next = apply_fun(c_p, r(0), x(1), reg);
+ HEAVY_SWAPIN;
+}
+
+HANDLE_APPLY_FUN_ERROR() {
+ goto find_func_info;
+}
+
+DISPATCH_FUN(I) {
+ SET_I($I);
+ Dispatchfun();
+}
+
+i_apply_fun() {
+ BeamInstr *next;
+ $APPLY_FUN(next);
+ if (ERTS_LIKELY(next != NULL)) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_FUN(next);
+ }
+ $HANDLE_APPLY_FUN_ERROR();
+}
+
+i_apply_fun_last(Deallocate) {
+ BeamInstr *next;
+ $APPLY_FUN(next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $deallocate($Deallocate);
+ $DISPATCH_FUN(next);
+ }
+ $HANDLE_APPLY_FUN_ERROR();
+}
+
+i_apply_fun_only() {
+ BeamInstr *next;
+ $APPLY_FUN(next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $DISPATCH_FUN(next);
+ }
+ $HANDLE_APPLY_FUN_ERROR();
+}
+
+CALL_FUN(Fun, Next) {
+ //| -no_next
+ HEAVY_SWAPOUT;
+ $Next = call_fun(c_p, $Fun, reg, THE_NON_VALUE);
+ HEAVY_SWAPIN;
+}
+
+i_call_fun(Fun) {
+ BeamInstr *next;
+ $CALL_FUN($Fun, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ SET_CP(c_p, $NEXT_INSTRUCTION);
+ $DISPATCH_FUN(next);
+ }
+ $HANDLE_APPLY_FUN_ERROR();
+}
+
+i_call_fun_last(Fun, Deallocate) {
+ BeamInstr *next;
+ $CALL_FUN($Fun, next);
+ if (ERTS_LIKELY(next != NULL)) {
+ $deallocate($Deallocate);
+ $DISPATCH_FUN(next);
+ }
+ $HANDLE_APPLY_FUN_ERROR();
+}
+
+return() {
+ SET_I(c_p->cp);
+ DTRACE_RETURN_FROM_PC(c_p);
+
+ /*
+ * We must clear the CP to make sure that a stale value do not
+ * create a false module dependcy preventing code upgrading.
+ * It also means that we can use the CP in stack backtraces.
+ */
+ c_p->cp = 0;
+ CHECK_TERM(r(0));
+ HEAP_SPACE_VERIFIED(0);
+ DispatchReturn;
+}
+
+get_list(Src, Hd, Tl) {
+ Eterm* tmp_ptr = list_val($Src);
+ Eterm hd, tl;
+ hd = CAR(tmp_ptr);
+ tl = CDR(tmp_ptr);
+ $Hd = hd;
+ $Tl = tl;
+}
+
+get_hd(Src, Hd) {
+ Eterm* tmp_ptr = list_val($Src);
+ $Hd = CAR(tmp_ptr);
+}
+
+get_tl(Src, Tl) {
+ Eterm* tmp_ptr = list_val($Src);
+ $Tl = CDR(tmp_ptr);
+}
+
+i_get(Src, Dst) {
+ $Dst = erts_pd_hash_get(c_p, $Src);
+}
+
+i_get_hash(Src, Hash, Dst) {
+ $Dst = erts_pd_hash_get_with_hx(c_p, $Hash, $Src);
+}
+
+i_get_tuple_element(Src, Element, Dst) {
+ Eterm* src = ADD_BYTE_OFFSET(tuple_val($Src), $Element);
+ $Dst = *src;
+}
+
+i_get_tuple_element2(Src, Element, Dst) {
+ Eterm* src;
+ Eterm* dst;
+ Eterm E1, E2;
+ src = ADD_BYTE_OFFSET(tuple_val($Src), $Element);
+ dst = &($Dst);
+ E1 = src[0];
+ E2 = src[1];
+ dst[0] = E1;
+ dst[1] = E2;
+}
+
+i_get_tuple_element2y(Src, Element, D1, D2) {
+ Eterm* src;
+ Eterm E1, E2;
+ src = ADD_BYTE_OFFSET(tuple_val($Src), $Element);
+ E1 = src[0];
+ E2 = src[1];
+ $D1 = E1;
+ $D2 = E2;
+}
+
+i_get_tuple_element3(Src, Element, Dst) {
+ Eterm* src;
+ Eterm* dst;
+ Eterm E1, E2, E3;
+ src = ADD_BYTE_OFFSET(tuple_val($Src), $Element);
+ dst = &($Dst);
+ E1 = src[0];
+ E2 = src[1];
+ E3 = src[2];
+ dst[0] = E1;
+ dst[1] = E2;
+ dst[2] = E3;
+}
+
+i_element := element_group.fetch.execute;
+
+
+element_group.head() {
+ Eterm element_tuple;
+}
+
+element_group.fetch(Src) {
+ element_tuple = $Src;
+}
+
+element_group.execute(Fail, Index, Dst) {
+ Eterm element_index = $Index;
+ if (ERTS_LIKELY(is_small(element_index) && is_tuple(element_tuple))) {
+ Eterm* tp = tuple_val(element_tuple);
+
+ if ((signed_val(element_index) >= 1) &&
+ (signed_val(element_index) <= arityval(*tp))) {
+ $Dst = tp[signed_val(element_index)];
+ $NEXT0();
+ }
+ }
+ c_p->freason = BADARG;
+ $BIF_ERROR_ARITY_2($Fail, BIF_element_2, element_index, element_tuple);
+}
+
+i_fast_element := fast_element_group.fetch.execute;
+
+fast_element_group.head() {
+ Eterm fast_element_tuple;
+}
+
+fast_element_group.fetch(Src) {
+ fast_element_tuple = $Src;
+}
+
+fast_element_group.execute(Fail, Index, Dst) {
+ if (ERTS_LIKELY(is_tuple(fast_element_tuple))) {
+ Eterm* tp = tuple_val(fast_element_tuple);
+ Eterm pos = $Index; /* Untagged integer >= 1 */
+ if (pos <= arityval(*tp)) {
+ $Dst = tp[pos];
+ $NEXT0();
+ }
+ }
+ c_p->freason = BADARG;
+ $BIF_ERROR_ARITY_2($Fail, BIF_element_2, make_small($Index), fast_element_tuple);
+}
+
+init(Y) {
+ make_blank($Y);
+}
+
+init2(Y1, Y2) {
+ make_blank($Y1);
+ make_blank($Y2);
+}
+
+init3(Y1, Y2, Y3) {
+ make_blank($Y1);
+ make_blank($Y2);
+ make_blank($Y3);
+}
+
+i_make_fun(FunP, NumFree) {
+ HEAVY_SWAPOUT;
+ x(0) = new_fun(c_p, reg, (ErlFunEntry *) $FunP, $NumFree);
+ HEAVY_SWAPIN;
+}
+
+i_trim(Words) {
+ Uint cp = E[0];
+ E += $Words;
+ E[0] = cp;
+}
+
+move(Src, Dst) {
+ $Dst = $Src;
+}
+
+move3(S1, D1, S2, D2, S3, D3) {
+ $D1 = $S1;
+ $D2 = $S2;
+ $D3 = $S3;
+}
+
+move_dup(Src, D1, D2) {
+ $D1 = $D2 = $Src;
+}
+
+move2_par(S1, D1, S2, D2) {
+ Eterm V1, V2;
+ V1 = $S1;
+ V2 = $S2;
+ $D1 = V1;
+ $D2 = V2;
+}
+
+move_shift(Src, SD, D) {
+ Eterm V;
+ V = $Src;
+ $D = $SD;
+ $SD = V;
+}
+
+move_window3(S1, S2, S3, D) {
+ Eterm xt0, xt1, xt2;
+ Eterm* y = &$D;
+ xt0 = $S1;
+ xt1 = $S2;
+ xt2 = $S3;
+ y[0] = xt0;
+ y[1] = xt1;
+ y[2] = xt2;
+}
+
+move_window4(S1, S2, S3, S4, D) {
+ Eterm xt0, xt1, xt2, xt3;
+ Eterm* y = &$D;
+ xt0 = $S1;
+ xt1 = $S2;
+ xt2 = $S3;
+ xt3 = $S4;
+ y[0] = xt0;
+ y[1] = xt1;
+ y[2] = xt2;
+ y[3] = xt3;
+}
+
+move_window5(S1, S2, S3, S4, S5, D) {
+ Eterm xt0, xt1, xt2, xt3, xt4;
+ Eterm *y = &$D;
+ xt0 = $S1;
+ xt1 = $S2;
+ xt2 = $S3;
+ xt3 = $S4;
+ xt4 = $S5;
+ y[0] = xt0;
+ y[1] = xt1;
+ y[2] = xt2;
+ y[3] = xt3;
+ y[4] = xt4;
+}
+
+move_return(Src) {
+ //| -no_next
+ x(0) = $Src;
+ SET_I(c_p->cp);
+ c_p->cp = 0;
+ DispatchReturn;
+}
+
+move_x1(Src) {
+ x(1) = $Src;
+}
+
+move_x2(Src) {
+ x(2) = $Src;
+}
+
+node(Dst) {
+ $Dst = erts_this_node->sysname;
+}
+
+put_list(Hd, Tl, Dst) {
+ HTOP[0] = $Hd;
+ HTOP[1] = $Tl;
+ $Dst = make_list(HTOP);
+ HTOP += 2;
+}
+
+update_list(Hd, Dst) {
+ HTOP[0] = $Hd;
+ HTOP[1] = $Dst;
+ $Dst = make_list(HTOP);
+ HTOP += 2;
+}
+
+i_put_tuple := i_put_tuple.make.fill;
+
+i_put_tuple.make(Dst) {
+ $Dst = make_tuple(HTOP);
+}
+
+i_put_tuple.fill(Arity) {
+ Eterm* hp = HTOP;
+ Eterm arity = $Arity;
+
+ //| -no_next
+ *hp++ = make_arityval(arity);
+ I = $NEXT_INSTRUCTION;
+ do {
+ Eterm term = *I++;
+ switch (loader_tag(term)) {
+ case LOADER_X_REG:
+ *hp++ = x(loader_x_reg_index(term));
+ break;
+ case LOADER_Y_REG:
+ *hp++ = y(loader_y_reg_index(term));
+ break;
+ default:
+ *hp++ = term;
+ break;
+ }
+ } while (--arity != 0);
+ HTOP = hp;
+ ASSERT(VALID_INSTR(* (Eterm *)I));
+ Goto(*I);
+}
+
+self(Dst) {
+ $Dst = c_p->common.id;
+}
+
+set_tuple_element(Element, Tuple, Offset) {
+ Eterm* p;
+
+ ASSERT(is_tuple($Tuple));
+ p = (Eterm *) ((unsigned char *) tuple_val($Tuple) + $Offset);
+ *p = $Element;
+}
+
+swap(R1, R2) {
+ Eterm V = $R1;
+ $R1 = $R2;
+ $R2 = V;
+}
+
+swap_temp(R1, R2, Tmp) {
+ Eterm V = $R1;
+ $R1 = $R2;
+ $R2 = $Tmp = V;
+}
+
+test_heap(Nh, Live) {
+ $GC_TEST(0, $Nh, $Live);
+}
+
+test_heap_1_put_list(Nh, Reg) {
+ $test_heap($Nh, 1);
+ $put_list($Reg, x(0), x(0));
+}
+
+is_integer_allocate(Fail, Src, NeedStack, Live) {
+ //| -no_prefetch
+ $is_integer($Fail, $Src);
+ $AH($NeedStack, 0, $Live);
+}
+
+is_nonempty_list(Fail, Src) {
+ //| -no_prefetch
+ if (is_not_list($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_nonempty_list_test_heap(Fail, Need, Live) {
+ //| -no_prefetch
+ $is_nonempty_list($Fail, x(0));
+ $test_heap($Need, $Live);
+}
+
+is_nonempty_list_allocate(Fail, Src, Need, Live) {
+ //| -no_prefetch
+ $is_nonempty_list($Fail, $Src);
+ $AH($Need, 0, $Live);
+}
+
+is_nonempty_list_get_list(Fail, Src, Hd, Tl) {
+ //| -no_prefetch
+ $is_nonempty_list($Fail, $Src);
+ $get_list($Src, $Hd, $Tl);
+}
+
+jump(Fail) {
+ $JUMP($Fail);
+}
+
+move_jump(Fail, Src) {
+ x(0) = $Src;
+ $jump($Fail);
+}
+
+//
+// Test instructions.
+//
+
+is_atom(Fail, Src) {
+ if (is_not_atom($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_boolean(Fail, Src) {
+ if (($Src) != am_true && ($Src) != am_false) {
+ $FAIL($Fail);
+ }
+}
+
+is_binary(Fail, Src) {
+ if (is_not_binary($Src) || binary_bitsize($Src) != 0) {
+ $FAIL($Fail);
+ }
+}
+
+is_bitstring(Fail, Src) {
+ if (is_not_binary($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_float(Fail, Src) {
+ if (is_not_float($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_function(Fail, Src) {
+ if ( !(is_any_fun($Src)) ) {
+ $FAIL($Fail);
+ }
+}
+
+is_function2(Fail, Fun, Arity) {
+ if (erl_is_function(c_p, $Fun, $Arity) != am_true ) {
+ $FAIL($Fail);
+ }
+}
+
+is_integer(Fail, Src) {
+ if (is_not_integer($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_list(Fail, Src) {
+ if (is_not_list($Src) && is_not_nil($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_map(Fail, Src) {
+ if (is_not_map($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_nil(Fail, Src) {
+ if (is_not_nil($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_number(Fail, Src) {
+ if (is_not_integer($Src) && is_not_float($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_pid(Fail, Src) {
+ if (is_not_pid($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_port(Fail, Src) {
+ if (is_not_port($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_reference(Fail, Src) {
+ if (is_not_ref($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_tagged_tuple(Fail, Src, Arityval, Tag) {
+ Eterm term = $Src;
+ if (!(BEAM_IS_TUPLE(term) &&
+ (tuple_val(term))[0] == $Arityval &&
+ (tuple_val(term))[1] == $Tag)) {
+ $FAIL($Fail);
+ }
+}
+
+is_tuple(Fail, Src) {
+ if (is_not_tuple($Src)) {
+ $FAIL($Fail);
+ }
+}
+
+is_tuple_of_arity(Fail, Src, Arityval) {
+ Eterm term = $Src;
+ if (!(BEAM_IS_TUPLE(term) && *tuple_val(term) == $Arityval)) {
+ $FAIL($Fail);
+ }
+}
+
+test_arity(Fail, Pointer, Arity) {
+ if (*tuple_val($Pointer) != $Arity) {
+ $FAIL($Fail);
+ }
+}
+
+i_is_eq_exact_immed(Fail, X, Y) {
+ if ($X != $Y) {
+ $FAIL($Fail);
+ }
+}
+
+i_is_ne_exact_immed(Fail, X, Y) {
+ if ($X == $Y) {
+ $FAIL($Fail);
+ }
+}
+
+is_eq_exact(Fail, X, Y) {
+ if (!EQ($X, $Y)) {
+ $FAIL($Fail);
+ }
+}
+
+i_is_eq_exact_literal(Fail, Src, Literal) {
+ Eterm src = $Src;
+ if (is_immed(src) || !eq(src, $Literal)) {
+ $FAIL($Fail);
+ }
+}
+
+is_ne_exact(Fail, X, Y) {
+ if (EQ($X, $Y)) {
+ $FAIL($Fail);
+ }
+}
+
+i_is_ne_exact_literal(Fail, Src, Literal) {
+ Eterm src = $Src;
+ if (!is_immed(src) && eq(src, $Literal)) {
+ $FAIL($Fail);
+ }
+}
+
+is_eq(Fail, X, Y) {
+ CMP_EQ_ACTION($X, $Y, $FAIL($Fail));
+}
+
+is_ne(Fail, X, Y) {
+ CMP_NE_ACTION($X, $Y, $FAIL($Fail));
+}
+
+is_lt(Fail, X, Y) {
+ CMP_LT_ACTION($X, $Y, $FAIL($Fail));
+}
+
+is_ge(Fail, X, Y) {
+ CMP_GE_ACTION($X, $Y, $FAIL($Fail));
+}
+
+badarg(Fail) {
+ $BADARG($Fail);
+ //| -no_next;
+}
+
+badmatch(Src) {
+ c_p->fvalue = $Src;
+ c_p->freason = BADMATCH;
+ goto find_func_info;
+ //| -no_next;
+}
+
+case_end(Src) {
+ c_p->fvalue = $Src;
+ c_p->freason = EXC_CASE_CLAUSE;
+ goto find_func_info;
+ //| -no_next;
+}
+
+if_end() {
+ c_p->freason = EXC_IF_CLAUSE;
+ goto find_func_info;
+ //| -no_next;
+}
+
+system_limit(Fail) {
+ $SYSTEM_LIMIT($Fail);
+ //| -no_next;
+}
+
+catch(Y, Fail) {
+ c_p->catches++;
+ $Y = $Fail;
+}
+
+catch_end(Y) {
+ $try_end($Y);
+ if (is_non_value(r(0))) {
+ c_p->fvalue = NIL;
+ if (x(1) == am_throw) {
+ r(0) = x(2);
+ } else {
+ if (x(1) == am_error) {
+ SWAPOUT;
+ x(2) = add_stacktrace(c_p, x(2), x(3));
+ SWAPIN;
+ }
+ /* only x(2) is included in the rootset here */
+ if (E - HTOP < 3) {
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect_nobump(c_p, 3, reg+2, 1, FCALLS);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ }
+ r(0) = TUPLE2(HTOP, am_EXIT, x(2));
+ HTOP += 3;
+ }
+ }
+ CHECK_TERM(r(0));
+}
+
+try_end(Y) {
+ c_p->catches--;
+ make_blank($Y);
+}
+
+try_case(Y) {
+ $try_end($Y);
+ ASSERT(is_non_value(r(0)));
+ c_p->fvalue = NIL;
+ r(0) = x(1);
+ x(1) = x(2);
+ x(2) = x(3);
+}
+
+try_case_end(Src) {
+ c_p->fvalue = $Src;
+ c_p->freason = EXC_TRY_CLAUSE;
+ goto find_func_info;
+ //| -no_next;
+}
+
+i_raise() {
+ Eterm raise_trace = x(2);
+ Eterm raise_value = x(1);
+ struct StackTrace *s;
+
+ c_p->fvalue = raise_value;
+ c_p->ftrace = raise_trace;
+ s = get_trace_from_exc(raise_trace);
+ if (s == NULL) {
+ c_p->freason = EXC_ERROR;
+ } else {
+ c_p->freason = PRIMARY_EXCEPTION(s->freason);
+ }
+ goto find_func_info;
+ //| -no_next
+}
+
+build_stacktrace() {
+ SWAPOUT;
+ x(0) = build_stacktrace(c_p, x(0));
+ SWAPIN;
+}
+
+raw_raise() {
+ Eterm class = x(0);
+ Eterm value = x(1);
+ Eterm stacktrace = x(2);
+
+ if (class == am_error) {
+ c_p->freason = EXC_ERROR & ~EXF_SAVETRACE;
+ c_p->fvalue = value;
+ c_p->ftrace = stacktrace;
+ goto find_func_info;
+ } else if (class == am_exit) {
+ c_p->freason = EXC_EXIT & ~EXF_SAVETRACE;
+ c_p->fvalue = value;
+ c_p->ftrace = stacktrace;
+ goto find_func_info;
+ } else if (class == am_throw) {
+ c_p->freason = EXC_THROWN & ~EXF_SAVETRACE;
+ c_p->fvalue = value;
+ c_p->ftrace = stacktrace;
+ goto find_func_info;
+ } else {
+ x(0) = am_badarg;
+ }
+}
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 77dbe92241..5325480901 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -52,6 +52,8 @@
#include "erl_bif_unique.h"
#include "erl_hl_timer.h"
#include "erl_time.h"
+#include "erl_io_queue.h"
+#include "erl_proc_sig_queue.h"
extern ErlDrvEntry fd_driver_entry;
extern ErlDrvEntry vanilla_driver_entry;
@@ -59,13 +61,13 @@ extern ErlDrvEntry spawn_driver_entry;
#ifndef __WIN32__
extern ErlDrvEntry forker_driver_entry;
#endif
-extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */
+extern ErtsStaticDriver driver_tab[]; /* table of static drivers, only used during initialization */
erts_driver_t *driver_list; /* List of all drivers, static and dynamic. */
-erts_smp_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */
-static erts_smp_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling
+erts_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */
+static erts_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling
driver init */
-static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a
+static erts_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a
per thread basis (for BC interfaces) */
ErtsPTab erts_port erts_align_attribute(ERTS_CACHE_LINE_SIZE); /* The port table */
@@ -93,22 +95,16 @@ static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *);
static void terminate_port(Port *p);
static void pdl_init(void);
static int driver_failure_term(ErlDrvPort ix, Eterm term, int eof);
-#ifdef ERTS_SMP
static void driver_monitor_lock_pdl(Port *p);
static void driver_monitor_unlock_pdl(Port *p);
#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port((Port), 1)
#define DRV_MONITOR_LOCK_PDL(Port) driver_monitor_lock_pdl(Port)
#define DRV_MONITOR_UNLOCK_PDL(Port) driver_monitor_unlock_pdl(Port)
-#else
-#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port((Port), 0)
-#define DRV_MONITOR_LOCK_PDL(Port) /* nothing */
-#define DRV_MONITOR_UNLOCK_PDL(Port) /* nothing */
-#endif
#define ERL_SMALL_IO_BIN_LIMIT (4*ERL_ONHEAP_BIN_LIMIT)
#define SMALL_WRITE_VEC 16
-static ERTS_INLINE ErlIOQueue*
+static ERTS_INLINE ErlPortIOQueue*
drvport2ioq(ErlDrvPort drvport)
{
Port *prt = erts_thr_drvport2port(drvport, 0);
@@ -121,13 +117,13 @@ static ERTS_INLINE int
is_port_ioq_empty(Port *pp)
{
int res;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
if (!pp->port_data_lock)
- res = (pp->ioq.size == 0);
+ res = (erts_ioq_size(&pp->ioq) == 0);
else {
ErlDrvPDL pdl = pp->port_data_lock;
erts_mtx_lock(&pdl->mtx);
- res = (pp->ioq.size == 0);
+ res = (erts_ioq_size(&pp->ioq) == 0);
erts_mtx_unlock(&pdl->mtx);
}
return res;
@@ -142,14 +138,14 @@ erts_is_port_ioq_empty(Port *pp)
Uint
erts_port_ioq_size(Port *pp)
{
- int res;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ErlDrvSizeT res;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
if (!pp->port_data_lock)
- res = pp->ioq.size;
+ res = erts_ioq_size(&pp->ioq);
else {
ErlDrvPDL pdl = pp->port_data_lock;
erts_mtx_lock(&pdl->mtx);
- res = pp->ioq.size;
+ res = erts_ioq_size(&pp->ioq);
erts_mtx_unlock(&pdl->mtx);
}
return (Uint) res;
@@ -211,14 +207,13 @@ dtrace_drvport_str(ErlDrvPort drvport, char *port_buf)
static ERTS_INLINE void
kill_port(Port *pp)
{
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(pp));
ERTS_TRACER_CLEAR(&ERTS_TRACER(pp));
erts_ptab_delete_element(&erts_port, &pp->common); /* Time of death */
erts_port_task_free_port(pp);
/* In non-smp case the port structure may have been deallocated now */
}
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
int
@@ -226,12 +221,11 @@ erts_lc_is_port_locked(Port *prt)
{
if (!prt)
return 0;
- ERTS_SMP_LC_ASSERT(prt->lock);
- return erts_smp_lc_mtx_is_locked(prt->lock);
+ ERTS_LC_ASSERT(prt->lock);
+ return erts_lc_mtx_is_locked(prt->lock);
}
#endif
-#endif /* #ifdef ERTS_SMP */
static void initq(Port* prt);
@@ -255,32 +249,21 @@ static ERTS_INLINE void port_init_instr(Port *prt
* Stuff that need to be initialized with the port id
* in the instrumented case, but not in the normal case.
*/
-#ifdef ERTS_SMP
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
- char *lock_str = "port_lock";
- erts_mtx_init_locked_x(prt->lock, lock_str, id,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
-#else
- 0
-#endif
- );
+ erts_mtx_init_locked(prt->lock, "port_lock", id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
-#endif
erts_port_task_init_sched(&prt->sched, id);
}
#if !ERTS_PORT_INIT_INSTR_NEED_ID
static ERTS_INLINE void port_init_instr_abort(Port *prt)
{
-#ifdef ERTS_SMP
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
erts_mtx_unlock(prt->lock);
erts_mtx_destroy(prt->lock);
}
-#endif
erts_port_task_fini_sched(&prt->sched);
}
#endif
@@ -316,15 +299,12 @@ static Port *create_port(char *name,
erts_aint32_t state = ERTS_PORT_SFLG_CONNECTED;
erts_aint32_t x_pts_flgs = 0;
-#ifdef ERTS_SMP
- ErtsRunQueue *runq;
if (!driver_lock) {
/* Align size for mutex following port struct */
port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
size += sizeof(erts_mtx_t);
}
else
-#endif
port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
#ifdef DEBUG
@@ -358,7 +338,6 @@ static Port *create_port(char *name,
p += busy_port_queue_size;
}
-#ifdef ERTS_SMP
if (driver_lock) {
prt->lock = driver_lock;
erts_mtx_lock(driver_lock);
@@ -368,17 +347,18 @@ static Port *create_port(char *name,
p += sizeof(erts_mtx_t);
state |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK;
}
- if (erts_get_scheduler_data())
- runq = erts_get_runq_current(NULL);
- else
- runq = ERTS_RUNQ_IX(0);
- erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq);
+
+ {
+ ErtsRunQueue *runq;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp)
+ runq = erts_get_runq_current(esdp);
+ else
+ runq = ERTS_RUNQ_IX(0);
+ erts_init_runq_port(prt, runq);
+ }
prt->xports = NULL;
-#else
- erts_atomic32_init_nob(&prt->refc, 1);
- prt->cleanup = 0;
-#endif
erts_port_task_pre_init_sched(&prt->sched, busy_port_queue);
@@ -387,6 +367,7 @@ static Port *create_port(char *name,
prt->drv_ptr = driver;
ERTS_P_LINKS(prt) = NULL;
ERTS_P_MONITORS(prt) = NULL;
+ ERTS_P_LT_MONITORS(prt) = NULL;
prt->linebuf = NULL;
prt->suspended = NULL;
erts_init_port_data(prt);
@@ -394,12 +375,11 @@ static Port *create_port(char *name,
prt->control_flags = 0;
prt->bytes_in = 0;
prt->bytes_out = 0;
- prt->dist_entry = NULL;
ERTS_PORT_INIT_CONNECTED(prt, pid);
prt->common.u.alive.reg = NULL;
ERTS_PTMR_INIT(prt);
erts_port_task_handle_init(&prt->timeout_task);
- erts_smp_atomic_init_nob(&prt->psd, (erts_aint_t) NULL);
+ erts_atomic_init_nob(&prt->psd, (erts_aint_t) NULL);
prt->async_open_port = NULL;
prt->drv_data = (SWord) 0;
prt->os_pid = -1;
@@ -426,10 +406,8 @@ static Port *create_port(char *name,
#if !ERTS_PORT_INIT_INSTR_NEED_ID
port_init_instr_abort(prt);
#endif
-#ifdef ERTS_SMP
if (driver_lock)
erts_mtx_unlock(driver_lock);
-#endif
if (enop)
*enop = 0;
erts_free(ERTS_ALC_T_PORT, prt);
@@ -442,7 +420,7 @@ static Port *create_port(char *name,
initq(prt);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (erts_port_schedule_all_ops)
x_pts_flgs |= ERTS_PTS_FLG_FORCE_SCHED;
@@ -451,29 +429,17 @@ static Port *create_port(char *name,
x_pts_flgs |= ERTS_PTS_FLG_PARALLELISM;
if (x_pts_flgs)
- erts_smp_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs);
+ erts_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs);
erts_atomic32_set_relb(&prt->state, state);
return prt;
}
-#ifndef ERTS_SMP
-void
-erts_port_cleanup(Port *prt)
-{
- if (prt->drv_ptr && prt->drv_ptr->handle)
- erts_ddll_dereference_driver(prt->drv_ptr->handle);
- prt->drv_ptr = NULL;
- erts_port_dec_refc(prt);
-}
-#endif
void
erts_port_free(Port *prt)
{
-#if defined(ERTS_SMP) || defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
-#endif
ERTS_LC_ASSERT(state & (ERTS_PORT_SFLG_INITIALIZING
| ERTS_PORT_SFLG_FREE));
ASSERT(state & ERTS_PORT_SFLG_PORT_DEBUG);
@@ -487,7 +453,6 @@ erts_port_free(Port *prt)
prt->async_open_port = NULL;
}
-#ifdef ERTS_SMP
ASSERT(prt->lock);
if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
erts_mtx_destroy(prt->lock);
@@ -504,7 +469,6 @@ erts_port_free(Port *prt)
*/
if (prt->drv_ptr->handle)
erts_ddll_dereference_driver(prt->drv_ptr->handle);
-#endif
erts_free(ERTS_ALC_T_PORT, prt);
}
@@ -515,41 +479,17 @@ erts_port_free(Port *prt)
*/
static void initq(Port* prt)
{
- ErlIOQueue* q = &prt->ioq;
-
ERTS_LC_ASSERT(!prt->port_data_lock);
-
- q->size = 0;
- q->v_head = q->v_tail = q->v_start = q->v_small;
- q->v_end = q->v_small + SMALL_IO_QUEUE;
- q->b_head = q->b_tail = q->b_start = q->b_small;
- q->b_end = q->b_small + SMALL_IO_QUEUE;
+ erts_ioq_init(&prt->ioq, ERTS_ALC_T_IOQ, 1);
}
static void stopq(Port* prt)
{
- ErlIOQueue* q;
- ErlDrvBinary** binp;
if (prt->port_data_lock)
driver_pdl_lock(prt->port_data_lock);
- q = &prt->ioq;
- binp = q->b_head;
-
- if (q->v_start != q->v_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
-
- while(binp < q->b_tail) {
- if (*binp != NULL)
- driver_free_binary(*binp);
- binp++;
- }
- if (q->b_start != q->b_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
- q->v_start = q->v_end = q->v_head = q->v_tail = NULL;
- q->b_start = q->b_end = q->b_head = q->b_tail = NULL;
- q->size = 0;
+ erts_ioq_clear(&prt->ioq);
if (prt->port_data_lock) {
driver_pdl_unlock(prt->port_data_lock);
@@ -563,7 +503,7 @@ erts_save_suspend_process_on_port(Port *prt, Process *process)
int saved;
erts_aint32_t flags;
erts_port_task_sched_lock(&prt->sched);
- flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ flags = erts_atomic32_read_nob(&prt->sched.flags);
saved = (flags & ERTS_PTS_FLGS_BUSY) && !(flags & ERTS_PTS_FLG_EXIT);
if (saved)
erts_proclist_store_last(&prt->suspended, erts_proclist_create(process));
@@ -607,16 +547,16 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
erts_mtx_t *driver_lock = NULL;
int cprt_flgs = 0;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
if (!driver) {
for (driver = driver_list; driver; driver = driver->next) {
if (sys_strcmp(driver->name, name) == 0)
break;
}
if (!driver) {
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG);
}
}
@@ -649,7 +589,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
*/
for (d = driver_list; d; d = d->next) {
- if (strcmp(d->name, name) == 0 &&
+ if (sys_strcmp(d->name, name) == 0 &&
erts_ddll_driver_ok(d->handle)) {
driver = d;
break;
@@ -661,19 +601,17 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
}
if (driver == NULL || (driver != &spawn_driver && opts->exit_status)) {
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG);
}
-#ifdef ERTS_SMP
driver_lock = driver->lock;
-#endif
if (driver->handle != NULL) {
erts_ddll_increment_port_count(driver->handle);
erts_ddll_reference_driver(driver->handle);
}
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
/*
* We'll set up the port before calling the start function,
@@ -686,9 +624,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
port = create_port(name, driver, driver_lock, cprt_flgs, pid, &port_errno);
if (!port) {
if (driver->handle) {
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(driver->handle);
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
erts_ddll_dereference_driver(driver->handle);
}
if (port_errno)
@@ -701,7 +639,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
trace_port_open(port,
pid,
erts_atom_put((byte *) port->name,
- strlen(port->name),
+ sys_strlen(port->name),
ERTS_ATOM_ENC_LATIN1,
1));
}
@@ -756,11 +694,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(port, am_out, am_open);
}
-#ifdef ERTS_SMP
if (port->xports)
erts_port_handle_xports(port);
ASSERT(!port->xports);
-#endif
}
if (error_type) {
@@ -775,9 +711,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
port->linebuf = NULL;
}
if (driver->handle != NULL) {
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(driver->handle);
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
}
kill_port(port);
erts_port_release(port);
@@ -789,7 +725,6 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
#undef ERTS_OPEN_DRIVER_RET
}
-#ifdef ERTS_SMP
struct ErtsXPortsList_ {
ErtsXPortsList *next;
@@ -798,7 +733,6 @@ struct ErtsXPortsList_ {
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(xports_list, ErtsXPortsList, 50, ERTS_ALC_T_XPORTS_LIST)
-#endif
/*
* Driver function to create new instances of a driver
@@ -815,10 +749,10 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
Port *creator_port;
Port* port;
erts_driver_t *driver;
- Process *rp;
erts_mtx_t *driver_lock = NULL;
+ ErtsLinkData *ldp;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
/* Need to be called from a scheduler thread */
if (!erts_get_scheduler_id())
@@ -828,17 +762,13 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
if (creator_port == ERTS_INVALID_ERL_DRV_PORT)
return ERTS_INVALID_ERL_DRV_PORT;
- rp = erts_proc_lookup(pid);
- if (!rp)
- return ERTS_INVALID_ERL_DRV_PORT;
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(creator_port));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(creator_port));
driver = creator_port->drv_ptr;
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
if (!erts_ddll_driver_ok(driver->handle)) {
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
- return ERTS_INVALID_ERL_DRV_PORT;
+ erts_rwmtx_runlock(&erts_driver_list_lock);
+ return ERTS_INVALID_ERL_DRV_PORT;
}
if (driver->handle != NULL) {
@@ -846,60 +776,57 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
erts_ddll_reference_referenced_driver(driver->handle);
}
-#ifdef ERTS_SMP
driver_lock = driver->lock;
-#endif
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
/* Inherit parallelism flag from parent */
if (ERTS_PTS_FLG_PARALLELISM &
- erts_smp_atomic32_read_nob(&creator_port->sched.flags))
+ erts_atomic32_read_nob(&creator_port->sched.flags))
cprt_flgs |= ERTS_CREATE_PORT_FLAG_PARALLELISM;
port = create_port(name, driver, driver_lock, cprt_flgs, pid, NULL);
if (!port) {
if (driver->handle) {
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(driver->handle);
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
erts_ddll_dereference_driver(driver->handle);
}
return ERTS_INVALID_ERL_DRV_PORT;
}
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(port));
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
- if (ERTS_PROC_IS_EXITING(rp)) {
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ ldp = erts_link_create(ERTS_LNK_TYPE_PORT,
+ port->common.id, pid);
+ ASSERT(ldp->a.other.item == pid);
+ ASSERT(ldp->b.other.item == port->common.id);
+ erts_link_tree_insert(&ERTS_P_LINKS(port), &ldp->a);
+
+ if (!erts_proc_sig_send_link(NULL, pid, &ldp->b)) {
+ erts_link_tree_delete(&ERTS_P_LINKS(port), &ldp->a);
+ erts_link_release_both(ldp);
if (driver->handle) {
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(driver->handle);
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
}
kill_port(port);
erts_port_release(port);
return ERTS_INVALID_ERL_DRV_PORT;
}
- erts_add_link(&ERTS_P_LINKS(port), LINK_PID, pid);
- erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, port->common.id);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-
-#ifdef ERTS_SMP
if (!driver_lock) {
ErtsXPortsList *xplp = xports_list_alloc();
xplp->port = port;
xplp->next = creator_port->xports;
creator_port->xports = xplp;
}
-#endif
port->drv_data = (UWord) drv_data;
return ERTS_Port2ErlDrvPort(port);
}
-#ifdef ERTS_SMP
int erts_port_handle_xports(Port *prt)
{
int reds = 0;
@@ -928,312 +855,6 @@ int erts_port_handle_xports(Port *prt)
prt->xports = NULL;
return reds;
}
-#endif
-
-/* Fills a possibly deep list of chars and binaries into vec
-** Small characters are first stored in the buffer buf of length ln
-** binaries found are copied and linked into msoh
-** Return vector length on succsess,
-** -1 on overflow
-** -2 on type error
-*/
-
-#ifdef DEBUG
-#define MAX_SYSIOVEC_IOVLEN (1ull << (32 - 1))
-#else
-#define MAX_SYSIOVEC_IOVLEN (1ull << (sizeof(((SysIOVec*)0)->iov_len) * 8 - 1))
-#endif
-
-static ERTS_INLINE void
-io_list_to_vec_set_vec(SysIOVec **iov, ErlDrvBinary ***binv,
- ErlDrvBinary *bin, byte *ptr, Uint len,
- int *vlen)
-{
- while (len > MAX_SYSIOVEC_IOVLEN) {
- (*iov)->iov_base = ptr;
- (*iov)->iov_len = MAX_SYSIOVEC_IOVLEN;
- ptr += MAX_SYSIOVEC_IOVLEN;
- len -= MAX_SYSIOVEC_IOVLEN;
- (*iov)++;
- (*vlen)++;
- *(*binv)++ = bin;
- }
- (*iov)->iov_base = ptr;
- (*iov)->iov_len = len;
- *(*binv)++ = bin;
- (*iov)++;
- (*vlen)++;
-}
-
-static int
-io_list_to_vec(Eterm obj, /* io-list */
- SysIOVec* iov, /* io vector */
- ErlDrvBinary** binv, /* binary reference vector */
- ErlDrvBinary* cbin, /* binary to store characters */
- ErlDrvSizeT bin_limit) /* small binaries limit */
-{
- DECLARE_ESTACK(s);
- Eterm* objp;
- byte *buf = (byte*)cbin->orig_bytes;
- Uint len = cbin->orig_size;
- Uint csize = 0;
- int vlen = 0;
- byte* cptr = buf;
-
- goto L_jump_start; /* avoid push */
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_jump_start:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
- if (is_byte(obj)) {
- if (len == 0)
- goto L_overflow;
- *buf++ = unsigned_val(obj);
- csize++;
- len--;
- } else if (is_binary(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto handle_binary;
- } else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- } else if (!is_nil(obj)) {
- goto L_type_error;
- }
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj)) {
- goto handle_binary;
- } else if (!is_nil(obj)) {
- goto L_type_error;
- }
- } else if (is_binary(obj)) {
- Eterm real_bin;
- Uint offset;
- Eterm* bptr;
- ErlDrvSizeT size;
- int bitoffs;
- int bitsize;
-
- handle_binary:
- size = binary_size(obj);
- ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
- ASSERT(bitsize == 0);
- bptr = binary_val(real_bin);
- if (*bptr == HEADER_PROC_BIN) {
- ProcBin* pb = (ProcBin *) bptr;
- if (bitoffs != 0) {
- if (len < size) {
- goto L_overflow;
- }
- erts_copy_bits(pb->bytes+offset, bitoffs, 1,
- (byte *) buf, 0, 1, size*8);
- csize += size;
- buf += size;
- len -= size;
- } else if (bin_limit && size < bin_limit) {
- if (len < size) {
- goto L_overflow;
- }
- sys_memcpy(buf, pb->bytes+offset, size);
- csize += size;
- buf += size;
- len -= size;
- } else {
- if (csize != 0) {
- io_list_to_vec_set_vec(&iov, &binv, cbin,
- cptr, csize, &vlen);
- cptr = buf;
- csize = 0;
- }
- if (pb->flags) {
- erts_emasculate_writable_binary(pb);
- }
- io_list_to_vec_set_vec(
- &iov, &binv, Binary2ErlDrvBinary(pb->val),
- pb->bytes+offset, size, &vlen);
- }
- } else {
- ErlHeapBin* hb = (ErlHeapBin *) bptr;
- if (len < size) {
- goto L_overflow;
- }
- copy_binary_to_buffer(buf, 0,
- ((byte *) hb->data)+offset, bitoffs,
- 8*size);
- csize += size;
- buf += size;
- len -= size;
- }
- } else if (!is_nil(obj)) {
- goto L_type_error;
- }
- }
-
- if (csize != 0) {
- io_list_to_vec_set_vec(&iov, &binv, cbin, cptr, csize, &vlen);
- }
-
- DESTROY_ESTACK(s);
- return vlen;
-
- L_type_error:
- DESTROY_ESTACK(s);
- return -2;
-
- L_overflow:
- DESTROY_ESTACK(s);
- return -1;
-}
-
-#define IO_LIST_VEC_COUNT(obj) \
-do { \
- Uint _size = binary_size(obj); \
- Eterm _real; \
- ERTS_DECLARE_DUMMY(Uint _offset); \
- int _bitoffs; \
- int _bitsize; \
- ERTS_GET_REAL_BIN(obj, _real, _offset, _bitoffs, _bitsize); \
- if (_bitsize != 0) goto L_type_error; \
- if (thing_subtag(*binary_val(_real)) == REFC_BINARY_SUBTAG && \
- _bitoffs == 0) { \
- b_size += _size; \
- if (b_size < _size) goto L_overflow_error; \
- in_clist = 0; \
- v_size++; \
- /* If iov_len is smaller then Uint we split the binary into*/ \
- /* multiple smaller (2GB) elements in the iolist.*/ \
- v_size += _size / MAX_SYSIOVEC_IOVLEN; \
- if (_size >= ERL_SMALL_IO_BIN_LIMIT) { \
- p_in_clist = 0; \
- p_v_size++; \
- } else { \
- p_c_size += _size; \
- if (!p_in_clist) { \
- p_in_clist = 1; \
- p_v_size++; \
- } \
- } \
- } else { \
- c_size += _size; \
- if (c_size < _size) goto L_overflow_error; \
- if (!in_clist) { \
- in_clist = 1; \
- v_size++; \
- } \
- p_c_size += _size; \
- if (!p_in_clist) { \
- p_in_clist = 1; \
- p_v_size++; \
- } \
- } \
-} while (0)
-
-
-/*
- * Returns 0 if successful and a non-zero value otherwise.
- *
- * Return values through pointers:
- * *vsize - SysIOVec size needed for a writev
- * *csize - Number of bytes not in binary (in the common binary)
- * *pvsize - SysIOVec size needed if packing small binaries
- * *pcsize - Number of bytes in the common binary if packing
- * *total_size - Total size of iolist in bytes
- */
-
-static int
-io_list_vec_len(Eterm obj, int* vsize, Uint* csize,
- Uint* pvsize, Uint* pcsize,
- ErlDrvSizeT* total_size)
-{
- DECLARE_ESTACK(s);
- Eterm* objp;
- Uint v_size = 0;
- Uint c_size = 0;
- Uint b_size = 0;
- Uint in_clist = 0;
- Uint p_v_size = 0;
- Uint p_c_size = 0;
- Uint p_in_clist = 0;
- Uint total;
-
- goto L_jump_start; /* avoid a push */
-
- while (!ESTACK_ISEMPTY(s)) {
- obj = ESTACK_POP(s);
- L_jump_start:
- if (is_list(obj)) {
- L_iter_list:
- objp = list_val(obj);
- obj = CAR(objp);
-
- if (is_byte(obj)) {
- c_size++;
- if (c_size == 0) {
- goto L_overflow_error;
- }
- if (!in_clist) {
- in_clist = 1;
- v_size++;
- }
- p_c_size++;
- if (!p_in_clist) {
- p_in_clist = 1;
- p_v_size++;
- }
- }
- else if (is_binary(obj)) {
- IO_LIST_VEC_COUNT(obj);
- }
- else if (is_list(obj)) {
- ESTACK_PUSH(s, CDR(objp));
- goto L_iter_list; /* on head */
- }
- else if (!is_nil(obj)) {
- goto L_type_error;
- }
-
- obj = CDR(objp);
- if (is_list(obj))
- goto L_iter_list; /* on tail */
- else if (is_binary(obj)) { /* binary tail is OK */
- IO_LIST_VEC_COUNT(obj);
- }
- else if (!is_nil(obj)) {
- goto L_type_error;
- }
- }
- else if (is_binary(obj)) {
- IO_LIST_VEC_COUNT(obj);
- }
- else if (!is_nil(obj)) {
- goto L_type_error;
- }
- }
-
- total = c_size + b_size;
- if (total < c_size) {
- goto L_overflow_error;
- }
- *total_size = (ErlDrvSizeT) total;
-
- DESTROY_ESTACK(s);
- *vsize = v_size;
- *csize = c_size;
- *pvsize = p_v_size;
- *pcsize = p_c_size;
- return 0;
-
- L_type_error:
- L_overflow_error:
- DESTROY_ESTACK(s);
- return 1;
-}
typedef enum {
ERTS_TRY_IMM_DRV_CALL_OK,
@@ -1280,12 +901,12 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
invalid_sched_flags |= ERTS_PTS_FLG_PARALLELISM;
if (sp->pre_chk_sched_flags) {
- sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ sp->sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
if (sp->sched_flags & invalid_sched_flags)
return ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
}
- if (erts_smp_port_trylock(prt) == EBUSY)
+ if (erts_port_trylock(prt) == EBUSY)
return ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK;
invalid_state = sp->state;
@@ -1299,7 +920,7 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
if (prof_runnable_ports)
erts_port_task_sched_lock(&prt->sched);
- act = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ act = erts_atomic32_read_nob(&prt->sched.flags);
do {
erts_aint32_t new;
@@ -1311,7 +932,7 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
}
exp = act;
new = act | ERTS_PTS_FLG_EXEC_IMM;
- act = erts_smp_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp);
+ act = erts_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp);
} while (act != exp);
sp->sched_flags = act;
@@ -1333,14 +954,17 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
profile_runnable_proc(c_p, am_inactive);
reds_left_in = ERTS_BIF_REDS_LEFT(c_p);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT((c_p->scheduler_data)->current_port == NULL);
+ (c_p->scheduler_data)->current_port = prt;
}
ASSERT(0 <= reds_left_in && reds_left_in <= CONTEXT_REDS);
sp->reds_left_in = reds_left_in;
prt->reds = CONTEXT_REDS - reds_left_in;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
@@ -1378,9 +1002,9 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
if (prof_runnable_ports)
erts_port_task_sched_lock(&prt->sched);
- act = erts_smp_atomic32_read_band_mb(&prt->sched.flags,
+ act = erts_atomic32_read_band_mb(&prt->sched.flags,
~ERTS_PTS_FLG_EXEC_IMM);
- ERTS_SMP_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM);
+ ERTS_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM);
if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
@@ -1395,7 +1019,10 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
erts_port_release(prt);
if (c_p) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ ASSERT((c_p->scheduler_data)->current_port == prt);
+ (c_p->scheduler_data)->current_port = NULL;
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
if (reds != (CONTEXT_REDS - sp->reds_left_in)) {
int bump_reds = reds - (CONTEXT_REDS - sp->reds_left_in);
@@ -1447,7 +1074,7 @@ finalize_force_imm_drv_call(ErtsTryImmDrvCallState *sp)
erts_unblock_fpe(sp->fpe_was_unmasked);
}
-#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (REF_THING_SIZE + 3)
+#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (ERTS_REF_THING_SIZE + 3)
static ERTS_INLINE void
queue_port_sched_op_reply(Process *rp,
@@ -1462,7 +1089,7 @@ queue_port_sched_op_reply(Process *rp,
ref= make_internal_ref(hp);
write_ref_thing(hp, ref_num[0], ref_num[1], ref_num[2]);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
msg = TUPLE2(hp, ref, msg);
@@ -1506,12 +1133,12 @@ port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg, Port* prt)
prt);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
}
-ErtsPortOpResult
+static ErtsPortOpResult
erts_schedule_proc2port_signal(Process *c_p,
Port *prt,
Eterm caller,
@@ -1524,7 +1151,7 @@ erts_schedule_proc2port_signal(Process *c_p,
int sched_res;
if (!refp) {
if (c_p)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
}
else {
ASSERT(c_p);
@@ -1545,21 +1172,10 @@ erts_schedule_proc2port_signal(Process *c_p,
* otherwise, next receive will *not* work
* as expected!
*/
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
-
- if (ERTS_PROC_PENDING_EXIT(c_p)) {
- /* need to exit caller instead */
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- KILL_CATCHES(c_p);
- c_p->freason = EXC_EXIT;
- return ERTS_PORT_OP_CALLER_EXIT;
- }
-
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
- c_p->msg.save = c_p->msg.last;
- erts_smp_proc_unlock(c_p, (ERTS_PROC_LOCKS_MSG_RECEIVE
- | ERTS_PROC_LOCK_MAIN));
+ ERTS_RECV_MARK_SAVE(c_p);
+ ERTS_RECV_MARK_SET(c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
}
@@ -1574,7 +1190,7 @@ erts_schedule_proc2port_signal(Process *c_p,
task_flags);
if (c_p)
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
if (sched_res != 0) {
if (refp) {
@@ -1585,9 +1201,9 @@ erts_schedule_proc2port_signal(Process *c_p,
* containing the reference created above...
*/
ASSERT(c_p);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
JOIN_MESSAGE(c_p);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
*refp = NIL;
}
return ERTS_PORT_OP_DROPPED;
@@ -1617,29 +1233,12 @@ erts_schedule_port2port_signal(Eterm port_num, ErtsProc2PortSigData *sigdp,
static ERTS_INLINE void
send_badsig(Port *prt) {
- ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- Process* rp;
Eterm connected = ERTS_PORT_GET_CONNECTED(prt);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
ERTS_LC_ASSERT(erts_get_scheduler_id());
-
ASSERT(is_internal_pid(connected));
-
- rp = erts_proc_lookup_raw(connected);
- if (rp) {
- erts_smp_proc_lock(rp, rp_locks);
- if (!ERTS_PROC_IS_EXITING(rp))
- (void) erts_send_exit_signal(NULL,
- prt->common.id,
- rp,
- &rp_locks,
- am_badsig,
- NIL,
- NULL,
- 0);
- if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
- } /* exit sent */
+ erts_proc_sig_send_exit(NULL, prt->common.id, connected,
+ am_badsig, NIL, 0);
} /* send_badsig */
static void
@@ -1761,7 +1360,7 @@ call_driver_outputv(int bang_op,
ErlDrvSizeT size = evp->size;
ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)
|| ERTS_IS_CRASH_DUMPING);
@@ -1804,8 +1403,7 @@ cleanup_scheduled_outputv(ErlIOVec *ev, ErlDrvBinary *cbinp)
int i;
/* Need to free all binaries */
for (i = 1; i < ev->vsize; i++)
- if (ev->binv[i])
- driver_free_binary(ev->binv[i]);
+ driver_free_binary(ev->binv[i]);
if (cbinp)
driver_free_binary(cbinp);
}
@@ -1819,7 +1417,7 @@ port_sig_outputv(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *s
case ERTS_PROC2PORT_SIG_EXEC:
/* Execution of a scheduled outputv() call */
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
reply = am_badarg;
@@ -1875,7 +1473,7 @@ call_driver_output(int bang_op,
else {
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)
|| ERTS_IS_CRASH_DUMPING);
#ifdef USE_VM_PROBES
@@ -1926,7 +1524,7 @@ port_sig_output(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *si
case ERTS_PROC2PORT_SIG_EXEC:
/* Execution of a scheduled output() call */
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
reply = am_badarg;
@@ -1968,21 +1566,19 @@ int
erts_port_output_async(Port *prt, Eterm from, Eterm list)
{
- ErtsPortOpResult res;
ErtsProc2PortSigData *sigdp;
erts_driver_t *drv = prt->drv_ptr;
size_t size;
int task_flags;
ErtsProc2PortSigCallback port_sig_callback;
- ErlDrvBinary *cbin = NULL;
- ErlIOVec *evp = NULL;
+ ErtsIOQBinary *cbin = NULL;
+ ErtsIOVec *evp = NULL;
char *buf = NULL;
ErtsPortTaskHandle *ns_pthp;
if (drv->outputv) {
- ErlIOVec ev;
SysIOVec* ivp;
- ErlDrvBinary** bvp;
+ ErtsIOQBinary** bvp;
int vsize;
Uint csize;
Uint pvsize;
@@ -1992,91 +1588,63 @@ erts_port_output_async(Port *prt, Eterm from, Eterm list)
char *ptr;
int i;
- Eterm* bptr = NULL;
- Uint offset;
-
- if (is_binary(list)) {
- /* We optimize for when we get a procbin without offset */
- Eterm real_bin;
- int bitoffs;
- int bitsize;
- ERTS_GET_REAL_BIN(list, real_bin, offset, bitoffs, bitsize);
- bptr = binary_val(real_bin);
- if (*bptr == HEADER_PROC_BIN && bitoffs == 0) {
- size = binary_size(list);
- vsize = 1;
- } else
- bptr = NULL;
- }
-
- if (!bptr) {
- if (io_list_vec_len(list, &vsize, &csize, &pvsize, &pcsize, &size))
- goto bad_value;
+ if (erts_ioq_iodata_vec_len(list, &vsize, &csize, &pvsize, &pcsize,
+ &size, ERL_SMALL_IO_BIN_LIMIT))
+ goto bad_value;
- /* To pack or not to pack (small binaries) ...? */
- if (vsize >= SMALL_WRITE_VEC) {
- /* Do pack */
- vsize = pvsize + 1;
- csize = pcsize;
- blimit = ERL_SMALL_IO_BIN_LIMIT;
- }
- cbin = driver_alloc_binary(csize);
+ /* To pack or not to pack (small binaries) ...? */
+ if (vsize >= SMALL_WRITE_VEC) {
+ /* Do pack */
+ vsize = pvsize + 1;
+ csize = pcsize;
+ blimit = ERL_SMALL_IO_BIN_LIMIT;
+ }
+ if (csize) {
+ cbin = (ErtsIOQBinary *)driver_alloc_binary(csize);
if (!cbin)
erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize));
}
-
iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlIOVec));
binv_offset = iov_offset;
binv_offset += ERTS_ALC_DATA_ALIGN_SIZE((vsize+1)*sizeof(SysIOVec));
alloc_size = binv_offset;
- alloc_size += (vsize+1)*sizeof(ErlDrvBinary *);
+ alloc_size += (vsize+1)*sizeof(ErtsIOQBinary *);
sigdp = erts_port_task_alloc_p2p_sig_data_extra(alloc_size, (void**)&ptr);
- evp = (ErlIOVec *) ptr;
- ivp = evp->iov = (SysIOVec *) (ptr + iov_offset);
- bvp = evp->binv = (ErlDrvBinary **) (ptr + binv_offset);
+ evp = (ErtsIOVec *) ptr;
+ ivp = evp->driver.iov = (SysIOVec *) (ptr + iov_offset);
+ bvp = evp->common.binv = (ErtsIOQBinary **) (ptr + binv_offset);
ivp[0].iov_base = NULL;
ivp[0].iov_len = 0;
bvp[0] = NULL;
- if (bptr) {
- ProcBin* pb = (ProcBin *) bptr;
-
- ivp[1].iov_base = pb->bytes+offset;
- ivp[1].iov_len = size;
- bvp[1] = Binary2ErlDrvBinary(pb->val);
-
- evp->vsize = 1;
- } else {
-
- evp->vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit);
- if (evp->vsize < 0) {
- if (evp != &ev)
- erts_free(ERTS_ALC_T_DRV_CMD_DATA, evp);
- driver_free_binary(cbin);
- goto bad_value;
- }
+ evp->driver.vsize = erts_ioq_iodata_to_vec(list, ivp+1, bvp+1, cbin,
+ blimit, 1);
+ if (evp->driver.vsize < 0) {
+ erts_free(ERTS_ALC_T_DRV_CMD_DATA, evp);
+ driver_free_binary(&cbin->driver);
+ goto bad_value;
}
#if 0
/* This assertion may say something useful, but it can
be falsified during the emulator test suites. */
ASSERT(evp->vsize == vsize);
#endif
- evp->vsize++;
- evp->size = size; /* total size */
+ evp->driver.vsize++;
+ evp->driver.size = size; /* total size */
/* Need to increase refc on all binaries */
- for (i = 1; i < evp->vsize; i++)
+ for (i = 1; i < evp->driver.vsize; i++)
if (bvp[i])
- driver_binary_inc_refc(bvp[i]);
+ driver_binary_inc_refc(&bvp[i]->driver);
sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUTV;
sigdp->u.outputv.from = from;
- sigdp->u.outputv.evp = evp;
- sigdp->u.outputv.cbinp = cbin;
+ sigdp->u.outputv.evp = &evp->driver;
+ sigdp->u.outputv.cbinp = &cbin->driver;
port_sig_callback = port_sig_outputv;
} else {
ErlDrvSizeT ERTS_DECLARE_DUMMY(r);
@@ -2102,26 +1670,18 @@ erts_port_output_async(Port *prt, Eterm from, Eterm list)
sigdp->u.output.size = size;
port_sig_callback = port_sig_output;
}
- sigdp->flags = 0;
ns_pthp = NULL;
task_flags = 0;
- res = erts_schedule_proc2port_signal(NULL,
- prt,
- ERTS_INVALID_PID,
- NULL,
- sigdp,
- task_flags,
- ns_pthp,
- port_sig_callback);
+ erts_schedule_proc2port_signal(NULL,
+ prt,
+ ERTS_INVALID_PID,
+ NULL,
+ sigdp,
+ task_flags,
+ ns_pthp,
+ port_sig_callback);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- if (drv->outputv)
- cleanup_scheduled_outputv(evp, cbin);
- else
- cleanup_scheduled_output(buf);
- return 1;
- }
return 1;
bad_value:
@@ -2155,8 +1715,8 @@ erts_port_output(Process *c_p,
erts_aint32_t sched_flags, busy_flgs, invalid_flags;
int task_flags;
ErtsProc2PortSigCallback port_sig_callback;
- ErlDrvBinary *cbin = NULL;
- ErlIOVec *evp = NULL;
+ ErtsIOQBinary *cbin = NULL;
+ ErtsIOVec *evp = NULL;
char *buf = NULL;
int force_immediate_call = (flags & ERTS_PORT_SIG_FLG_FORCE_IMM_CALL);
int async_nosuspend;
@@ -2179,7 +1739,7 @@ erts_port_output(Process *c_p,
* Assumes caller have checked that port is valid...
*/
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
if (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT))
return ((sched_flags & ERTS_PTS_FLG_EXIT)
? ERTS_PORT_OP_DROPPED
@@ -2202,11 +1762,11 @@ erts_port_output(Process *c_p,
}
#endif
if (drv->outputv) {
- ErlIOVec ev;
+ ErtsIOVec ev;
SysIOVec iv[SMALL_WRITE_VEC];
- ErlDrvBinary* bv[SMALL_WRITE_VEC];
+ ErtsIOQBinary* bv[SMALL_WRITE_VEC];
SysIOVec* ivp;
- ErlDrvBinary** bvp;
+ ErtsIOQBinary** bvp;
int vsize;
Uint csize;
Uint pvsize;
@@ -2214,18 +1774,19 @@ erts_port_output(Process *c_p,
Uint blimit;
size_t iov_offset, binv_offset, alloc_size;
- if (io_list_vec_len(list, &vsize, &csize, &pvsize, &pcsize, &size))
+ if (erts_ioq_iodata_vec_len(list, &vsize, &csize, &pvsize, &pcsize,
+ &size, ERL_SMALL_IO_BIN_LIMIT))
goto bad_value;
iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlIOVec));
binv_offset = iov_offset;
binv_offset += ERTS_ALC_DATA_ALIGN_SIZE((vsize+1)*sizeof(SysIOVec));
alloc_size = binv_offset;
- alloc_size += (vsize+1)*sizeof(ErlDrvBinary *);
+ alloc_size += (vsize+1)*sizeof(ErtsIOQBinary *);
if (try_call && vsize < SMALL_WRITE_VEC) {
- ivp = ev.iov = iv;
- bvp = ev.binv = bv;
+ ivp = ev.common.iov = iv;
+ bvp = ev.common.binv = bv;
evp = &ev;
}
else {
@@ -2236,9 +1797,9 @@ erts_port_output(Process *c_p,
sigdp = erts_port_task_alloc_p2p_sig_data_extra(
alloc_size, (void**)&ptr);
}
- evp = (ErlIOVec *) ptr;
- ivp = evp->iov = (SysIOVec *) (ptr + iov_offset);
- bvp = evp->binv = (ErlDrvBinary **) (ptr + binv_offset);
+ evp = (ErtsIOVec *) ptr;
+ ivp = evp->driver.iov = (SysIOVec *) (ptr + iov_offset);
+ bvp = evp->common.binv = (ErtsIOQBinary **) (ptr + binv_offset);
}
/* To pack or not to pack (small binaries) ...? */
@@ -2254,23 +1815,26 @@ erts_port_output(Process *c_p,
}
/* Use vsize and csize from now on */
- cbin = driver_alloc_binary(csize);
- if (!cbin)
- erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize));
+ if (csize) {
+ cbin = (ErtsIOQBinary *)driver_alloc_binary(csize);
+ if (!cbin)
+ erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize));
+ }
/* Element 0 is for driver usage to add header block */
ivp[0].iov_base = NULL;
ivp[0].iov_len = 0;
bvp[0] = NULL;
- evp->vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit);
- if (evp->vsize < 0) {
+ evp->driver.vsize = erts_ioq_iodata_to_vec(list, ivp+1, bvp+1,
+ cbin, blimit, 1);
+ if (evp->driver.vsize < 0) {
if (evp != &ev) {
if (try_call)
erts_free(ERTS_ALC_T_TMP, evp);
else
erts_port_task_free_p2p_sig_data(sigdp);
}
- driver_free_binary(cbin);
+ driver_free_binary(&cbin->driver);
goto bad_value;
}
#if 0
@@ -2278,19 +1842,19 @@ erts_port_output(Process *c_p,
be falsified during the emulator test suites. */
ASSERT(evp->vsize == vsize);
#endif
- evp->vsize++;
- evp->size = size; /* total size */
+ evp->driver.vsize++;
+ evp->driver.size = size; /* total size */
if (!try_call) {
int i;
/* Need to increase refc on all binaries */
- for (i = 1; i < evp->vsize; i++)
- if (bvp[i])
- driver_binary_inc_refc(bvp[i]);
+ for (i = 1; i < evp->driver.vsize; i++)
+ if (bvp[i])
+ driver_binary_inc_refc(&bvp[i]->driver);
}
else {
int i;
- ErlIOVec *new_evp;
+ ErtsIOVec *new_evp;
ErtsTryImmDrvCallResult try_call_res;
ErtsTryImmDrvCallState try_call_state
= ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
@@ -2313,14 +1877,14 @@ erts_port_output(Process *c_p,
from,
prt,
drv,
- evp);
+ &evp->driver);
if (force_immediate_call)
finalize_force_imm_drv_call(&try_call_state);
else
finalize_imm_drv_call(&try_call_state);
/* Fall through... */
case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
- driver_free_binary(cbin);
+ driver_free_binary(&cbin->driver);
if (evp != &ev) {
ASSERT(!sigdp);
erts_free(ERTS_ALC_T_TMP, evp);
@@ -2334,7 +1898,7 @@ erts_port_output(Process *c_p,
sched_flags = try_call_state.sched_flags;
if (async_nosuspend
&& (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT))) {
- driver_free_binary(cbin);
+ driver_free_binary(&cbin->driver);
if (evp != &ev) {
ASSERT(!sigdp);
erts_free(ERTS_ALC_T_TMP, evp);
@@ -2349,9 +1913,9 @@ erts_port_output(Process *c_p,
}
/* Need to increase refc on all binaries */
- for (i = 1; i < evp->vsize; i++)
+ for (i = 1; i < evp->driver.vsize; i++)
if (bvp[i])
- driver_binary_inc_refc(bvp[i]);
+ driver_binary_inc_refc(&bvp[i]->driver);
/* The port task and iovec is allocated in the
same structure as an optimization. This
@@ -2364,18 +1928,18 @@ erts_port_output(Process *c_p,
if (evp != &ev) {
/* Copy from TMP alloc to port task */
sys_memcpy((void *) new_evp, (void *) evp, alloc_size);
- new_evp->iov = (SysIOVec *) (((char *) new_evp)
- + iov_offset);
- bvp = new_evp->binv = (ErlDrvBinary **) (((char *) new_evp)
- + binv_offset);
+ new_evp->driver.iov = (SysIOVec *) (((char *) new_evp)
+ + iov_offset);
+ bvp = new_evp->common.binv = (ErtsIOQBinary **) (((char *) new_evp)
+ + binv_offset);
#ifdef DEBUG
- ASSERT(new_evp->vsize == evp->vsize);
- ASSERT(new_evp->size == evp->size);
- for (i = 0; i < evp->vsize; i++) {
- ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len);
- ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base);
- ASSERT(new_evp->binv[i] == evp->binv[i]);
+ ASSERT(new_evp->driver.vsize == evp->driver.vsize);
+ ASSERT(new_evp->driver.size == evp->driver.size);
+ for (i = 0; i < evp->driver.vsize; i++) {
+ ASSERT(new_evp->driver.iov[i].iov_len == evp->driver.iov[i].iov_len);
+ ASSERT(new_evp->driver.iov[i].iov_base == evp->driver.iov[i].iov_base);
+ ASSERT(new_evp->driver.binv[i] == evp->driver.binv[i]);
}
#endif
@@ -2384,24 +1948,24 @@ erts_port_output(Process *c_p,
else { /* from stack allocated structure; offsets may differ */
sys_memcpy((void *) new_evp, (void *) evp, sizeof(ErlIOVec));
- new_evp->iov = (SysIOVec *) (((char *) new_evp)
- + iov_offset);
- sys_memcpy((void *) new_evp->iov,
- (void *) evp->iov,
- evp->vsize * sizeof(SysIOVec));
- new_evp->binv = (ErlDrvBinary **) (((char *) new_evp)
- + binv_offset);
- sys_memcpy((void *) new_evp->binv,
- (void *) evp->binv,
- evp->vsize * sizeof(ErlDrvBinary *));
+ new_evp->driver.iov = (SysIOVec *) (((char *) new_evp)
+ + iov_offset);
+ sys_memcpy((void *) new_evp->driver.iov,
+ (void *) evp->driver.iov,
+ evp->driver.vsize * sizeof(SysIOVec));
+ new_evp->common.binv = (ErtsIOQBinary **) (((char *) new_evp)
+ + binv_offset);
+ sys_memcpy((void *) new_evp->common.binv,
+ (void *) evp->common.binv,
+ evp->driver.vsize * sizeof(ErtsIOQBinary *));
#ifdef DEBUG
- ASSERT(new_evp->vsize == evp->vsize);
- ASSERT(new_evp->size == evp->size);
- for (i = 0; i < evp->vsize; i++) {
- ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len);
- ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base);
- ASSERT(new_evp->binv[i] == evp->binv[i]);
+ ASSERT(new_evp->driver.vsize == evp->driver.vsize);
+ ASSERT(new_evp->driver.size == evp->driver.size);
+ for (i = 0; i < evp->driver.vsize; i++) {
+ ASSERT(new_evp->driver.iov[i].iov_len == evp->driver.iov[i].iov_len);
+ ASSERT(new_evp->driver.iov[i].iov_base == evp->driver.iov[i].iov_base);
+ ASSERT(new_evp->driver.binv[i] == evp->driver.binv[i]);
}
#endif
@@ -2412,8 +1976,8 @@ erts_port_output(Process *c_p,
sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUTV;
sigdp->u.outputv.from = from;
- sigdp->u.outputv.evp = evp;
- sigdp->u.outputv.cbinp = cbin;
+ sigdp->u.outputv.evp = &evp->driver;
+ sigdp->u.outputv.cbinp = &cbin->driver;
port_sig_callback = port_sig_outputv;
}
else {
@@ -2554,15 +2118,11 @@ erts_port_output(Process *c_p,
port_sig_callback);
if (res != ERTS_PORT_OP_SCHEDULED) {
- if (drv->outputv)
- cleanup_scheduled_outputv(evp, cbin);
- else
- cleanup_scheduled_output(buf);
return res;
}
if (!(flags & ERTS_PORT_SIG_FLG_FORCE)) {
- sched_flags = erts_smp_atomic32_read_acqb(&prt->sched.flags);
+ sched_flags = erts_atomic32_read_acqb(&prt->sched.flags);
if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT)) {
if (async_nosuspend)
erts_port_task_tmp_handle_detach(ns_pthp);
@@ -2611,11 +2171,11 @@ call_deliver_port_exit(int bang_op,
}
if (broken_link) {
- ErtsLink *lnk = erts_remove_link(&ERTS_P_LINKS(prt), from);
- if (lnk)
- erts_destroy_link(lnk);
- else
+ ErtsLink *lnk = erts_link_tree_lookup(ERTS_P_LINKS(prt), from);
+ if (!lnk)
return ERTS_PORT_OP_DROPPED;
+ erts_link_tree_delete(&ERTS_P_LINKS(prt), lnk);
+ erts_link_release(lnk);
}
if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
@@ -2736,21 +2296,14 @@ erts_port_exit(Process *c_p,
&bp->off_heap);
}
- res = erts_schedule_proc2port_signal(c_p,
- prt,
- c_p ? c_p->common.id : from,
- refp,
- sigdp,
- 0,
- NULL,
- port_sig_exit);
-
- if (res == ERTS_PORT_OP_DROPPED) {
- if (bp)
- free_message_buffer(bp);
- }
-
- return res;
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : from,
+ refp,
+ sigdp,
+ 0,
+ NULL,
+ port_sig_exit);
}
static ErtsPortOpResult
@@ -2791,27 +2344,45 @@ set_port_connected(int bang_op,
#endif
}
else { /* Port BIF operation */
- Process *rp = erts_proc_lookup_raw(connect);
- if (!rp)
- return ERTS_PORT_OP_DROPPED;
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
- if (ERTS_PROC_IS_EXITING(rp)) {
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- return ERTS_PORT_OP_DROPPED;
- }
+ int created;
+ ErtsLink *lnk;
+
+ if (is_not_internal_pid(connect))
+ return ERTS_PORT_OP_DROPPED;
+
+ lnk = erts_link_tree_lookup_create(&ERTS_P_LINKS(prt), &created,
+ ERTS_LNK_TYPE_PORT, prt->common.id,
+ connect);
+ if (created) {
+ ErtsLinkData *ldp;
+ ErtsLink *olnk = erts_link_to_other(lnk, &ldp);
+ ASSERT(olnk->other.item == prt->common.id);
+ if (!erts_proc_sig_send_link(NULL, connect, olnk)) {
+ erts_link_tree_delete(&ERTS_P_LINKS(prt), lnk);
+ erts_link_release_both(ldp);
+ return ERTS_PORT_OP_DROPPED;
+ }
+ if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_getting_linked, connect);
+ }
- erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, prt->common.id);
- erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, connect);
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(port_connect)) {
+ Eterm old_connected = ERTS_PORT_GET_CONNECTED(prt);
+ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE);
- if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(NULL, 0, rp, am_getting_linked, prt->common.id);
+ dtrace_pid_str(old_connected, process_str);
+ erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)),
+ "%T", prt->common.id);
+ dtrace_pid_str(connect, newprocess_str);
+ DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str);
+ }
+#endif
ERTS_PORT_SET_CONNECTED(prt, connect);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-
- if (IS_TRACED_FL(prt, F_TRACE_PORTS))
- trace_port(prt, am_getting_linked, connect);
if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
trace_port_receive(prt, from, am_connect, connect);
if (IS_TRACED_FL(prt, F_TRACE_SEND)) {
@@ -2819,18 +2390,6 @@ set_port_connected(int bang_op,
trace_port_send(prt, from, TUPLE2(hp, prt->common.id, am_connected), 1);
}
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(port_connect)) {
- DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE);
-
- dtrace_pid_str(connect, process_str);
- erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", prt->common.id);
- dtrace_proc_str(rp, newprocess_str);
- DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str);
- }
-#endif
}
return ERTS_PORT_OP_DONE;
@@ -2918,13 +2477,24 @@ erts_port_connect(Process *c_p,
}
static void
-port_unlink(Port *prt, Eterm from)
+port_unlink(Port *prt, ErtsLink *lnk)
{
- ErtsLink *lnk = erts_remove_link(&ERTS_P_LINKS(prt), from);
- if (lnk) {
+ ErtsLinkData *ldp;
+ ErtsLink *dlnk, *llnk;
+
+ llnk = erts_link_to_other(lnk, &ldp);
+ dlnk = erts_link_tree_key_delete(&ERTS_P_LINKS(prt), llnk);
+ if (!dlnk)
+ erts_link_release(lnk);
+ else {
if (IS_TRACED_FL(prt, F_TRACE_PORTS))
- trace_port(prt, am_getting_unlinked, from);
- erts_destroy_link(lnk);
+ trace_port(prt, am_getting_unlinked, llnk->other.item);
+ if (dlnk == llnk)
+ erts_link_release_both(ldp);
+ else {
+ erts_link_release(lnk);
+ erts_link_release(dlnk);
+ }
}
}
@@ -2932,14 +2502,14 @@ static int
port_sig_unlink(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
{
if (op == ERTS_PROC2PORT_SIG_EXEC)
- port_unlink(prt, sigdp->u.unlink.from);
+ port_unlink(prt, sigdp->u.unlink.lnk);
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
return ERTS_PORT_REDS_UNLINK;
}
ErtsPortOpResult
-erts_port_unlink(Process *c_p, Port *prt, Eterm from, Eterm *refp)
+erts_port_unlink(Process *c_p, Port *prt, ErtsLink *lnk, Eterm *refp)
{
ErtsProc2PortSigData *sigdp;
ErtsTryImmDrvCallState try_call_state
@@ -2952,7 +2522,7 @@ erts_port_unlink(Process *c_p, Port *prt, Eterm from, Eterm *refp)
switch (try_imm_drv_call(&try_call_state)) {
case ERTS_TRY_IMM_DRV_CALL_OK:
- port_unlink(prt, from);
+ port_unlink(prt, lnk);
finalize_imm_drv_call(&try_call_state);
BUMP_REDS(c_p, ERTS_PORT_REDS_UNLINK);
return ERTS_PORT_OP_DONE;
@@ -2965,11 +2535,12 @@ erts_port_unlink(Process *c_p, Port *prt, Eterm from, Eterm *refp)
sigdp = erts_port_task_alloc_p2p_sig_data();
sigdp->flags = ERTS_P2P_SIG_TYPE_UNLINK;
- sigdp->u.unlink.from = from;
-
+ sigdp->u.unlink.port_id = prt->common.id;
+ sigdp->u.unlink.lnk = lnk;
+
return erts_schedule_proc2port_signal(c_p,
prt,
- c_p ? c_p->common.id : from,
+ c_p->common.id,
refp,
sigdp,
0,
@@ -2978,45 +2549,24 @@ erts_port_unlink(Process *c_p, Port *prt, Eterm from, Eterm *refp)
}
static void
-port_link_failure(Eterm port_id, Eterm linker)
+port_link_failure(Eterm port_id, ErtsLink *lnk)
{
- Process *rp;
- ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND;
- ASSERT(is_internal_pid(linker));
- rp = erts_pid2proc(NULL, 0, linker, rp_locks);
- if (rp) {
- ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), port_id);
- if (rlnk) {
- int xres = erts_send_exit_signal(NULL,
- port_id,
- rp,
- &rp_locks,
- am_noproc,
- NIL,
- NULL,
- 0);
- if (xres >= 0) {
- /* We didn't exit the process and it is traced */
- if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(NULL, 0, rp, am_getting_unlinked, port_id);
- }
- if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
- }
- }
+ erts_proc_sig_send_link_exit(NULL, port_id, lnk, am_noproc, NIL);
}
static void
-port_link(Port *prt, erts_aint32_t state, Eterm to)
+port_link(Port *prt, erts_aint32_t state, ErtsLink *lnk)
{
- if (IS_TRACED_FL(prt, F_TRACE_PORTS))
- trace_port(prt, am_getting_linked, to);
- if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
- erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, to);
- } else {
- port_link_failure(prt->common.id, to);
- if (IS_TRACED_FL(prt, F_TRACE_PORTS))
- trace_port(prt, am_unlink, to);
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
+ port_link_failure(prt->common.id, lnk);
+ else {
+ ErtsLink *rlnk;
+ rlnk = erts_link_tree_insert_addr_replace(&ERTS_P_LINKS(prt),
+ lnk);
+ if (rlnk)
+ erts_link_release(rlnk);
+ else if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_getting_linked, lnk->other.item);
}
}
@@ -3024,17 +2574,16 @@ static int
port_sig_link(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
{
if (op == ERTS_PROC2PORT_SIG_EXEC)
- port_link(prt, state, sigdp->u.link.to);
- else {
- port_link_failure(sigdp->u.link.port, sigdp->u.link.to);
- }
+ port_link(prt, state, sigdp->u.link.lnk);
+ else
+ port_link_failure(sigdp->u.link.port_id, sigdp->u.link.lnk);
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
return ERTS_PORT_REDS_LINK;
}
ErtsPortOpResult
-erts_port_link(Process *c_p, Port *prt, Eterm to, Eterm *refp)
+erts_port_link(Process *c_p, Port *prt, ErtsLink *lnk, Eterm *refp)
{
ErtsProc2PortSigData *sigdp;
ErtsTryImmDrvCallState try_call_state
@@ -3047,7 +2596,7 @@ erts_port_link(Process *c_p, Port *prt, Eterm to, Eterm *refp)
switch (try_imm_drv_call(&try_call_state)) {
case ERTS_TRY_IMM_DRV_CALL_OK:
- port_link(prt, try_call_state.state, to);
+ port_link(prt, try_call_state.state, lnk);
finalize_imm_drv_call(&try_call_state);
BUMP_REDS(c_p, ERTS_PORT_REDS_LINK);
return ERTS_PORT_OP_DONE;
@@ -3060,12 +2609,12 @@ erts_port_link(Process *c_p, Port *prt, Eterm to, Eterm *refp)
sigdp = erts_port_task_alloc_p2p_sig_data();
sigdp->flags = ERTS_P2P_SIG_TYPE_LINK;
- sigdp->u.link.port = prt->common.id;
- sigdp->u.link.to = to;
+ sigdp->u.link.port_id = prt->common.id;
+ sigdp->u.link.lnk = lnk;
return erts_schedule_proc2port_signal(c_p,
prt,
- c_p ? c_p->common.id : to,
+ c_p->common.id,
refp,
sigdp,
0,
@@ -3074,51 +2623,23 @@ erts_port_link(Process *c_p, Port *prt, Eterm to, Eterm *refp)
}
static void
-port_monitor_failure(Eterm port_id, Eterm origin, Eterm ref_DOWN)
+port_monitor_failure(Eterm port_id, ErtsMonitor *mon)
{
- Process *origin_p;
- ErtsProcLocks p_locks = ERTS_PROC_LOCK_LINK;
- ASSERT(is_internal_pid(origin));
-
- origin_p = erts_pid2proc(NULL, 0, origin, p_locks);
- if (! origin_p) { return; }
-
- /* Send the DOWN message immediately. Ref is made on the fly because
- * caller has never seen it yet. */
- erts_queue_monitor_message(origin_p, &p_locks, ref_DOWN,
- am_port, port_id, am_noproc);
- erts_smp_proc_unlock(origin_p, p_locks);
+ erts_proc_sig_send_monitor_down(mon, am_noproc);
}
/* Origin wants to monitor port Prt. State contains possible error, which has
* happened just before. Name is either NIL or an atom, if user monitors
* a port by name. Ref is premade reference that will be returned to user */
static void
-port_monitor(Port *prt, erts_aint32_t state, Eterm origin,
- Eterm name, Eterm ref)
+port_monitor(Port *prt, erts_aint32_t state, ErtsMonitor *mon)
{
- Eterm name_or_nil = is_atom(name) ? name : NIL;
-
- ASSERT(is_pid(origin));
- ASSERT(is_atom(name) || is_port(name) || name == NIL);
- ASSERT(is_internal_ref(ref));
-
- if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
- ErtsProcLocks p_locks = ERTS_PROC_LOCK_LINK;
-
- Process *origin_p = erts_pid2proc(NULL, 0, origin, p_locks);
- if (! origin_p) {
- goto failure;
- }
- erts_add_monitor(&ERTS_P_MONITORS(origin_p), MON_ORIGIN, ref,
- prt->common.id, name_or_nil);
- erts_add_monitor(&ERTS_P_MONITORS(prt), MON_TARGET, ref,
- origin, name_or_nil);
-
- erts_smp_proc_unlock(origin_p, p_locks);
- } else {
-failure:
- port_monitor_failure(prt->common.id, origin, ref);
+ ASSERT(prt);
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
+ port_monitor_failure(prt->common.id, mon);
+ else {
+ ASSERT(erts_monitor_is_target(mon));
+ erts_monitor_list_insert(&ERTS_P_LT_MONITORS(prt), mon);
}
}
@@ -3126,24 +2647,11 @@ static int
port_sig_monitor(Port *prt, erts_aint32_t state, int op,
ErtsProc2PortSigData *sigdp)
{
- Eterm hp[REF_THING_SIZE];
- Eterm ref = make_internal_ref(&hp);
- write_ref_thing(hp, sigdp->ref[0], sigdp->ref[1], sigdp->ref[2]);
-
- if (op == ERTS_PROC2PORT_SIG_EXEC) {
- /* erts_add_monitor call inside port_monitor will copy ref from hp */
- port_monitor(prt, state,
- sigdp->u.monitor.origin,
- sigdp->u.monitor.name,
- ref);
- } else {
- port_monitor_failure(sigdp->u.monitor.name,
- sigdp->u.monitor.origin,
- ref);
- }
- if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) {
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
- }
+ if (op == ERTS_PROC2PORT_SIG_EXEC)
+ port_monitor(prt, state, sigdp->u.monitor.mon);
+ else
+ port_monitor_failure(sigdp->u.monitor.port_id,
+ sigdp->u.monitor.mon);
return ERTS_PORT_REDS_MONITOR;
}
@@ -3151,95 +2659,63 @@ port_sig_monitor(Port *prt, erts_aint32_t state, int op,
* a reference (ref may be rewritten to be used to serve additionally as a
* signal id). Name is atom if user monitors port by name or NIL */
ErtsPortOpResult
-erts_port_monitor(Process *origin, Port *port, Eterm name, Eterm *refp)
+erts_port_monitor(Process *c_p, Port *port, ErtsMonitor *mon)
{
ErtsProc2PortSigData *sigdp;
ErtsTryImmDrvCallState try_call_state
= ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
- origin, port, ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ c_p,
+ port,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
0,
- 0, /* trap_ref is always set so !trap_ref always is false */
+ !0,
am_monitor);
- ASSERT(origin);
+ ASSERT(c_p);
ASSERT(port);
- ASSERT(is_atom(name) || is_port(name));
- ASSERT(refp);
+ ASSERT(mon);
switch (try_imm_drv_call(&try_call_state)) {
case ERTS_TRY_IMM_DRV_CALL_OK:
- port_monitor(port, try_call_state.state, origin->common.id, name, *refp);
+ port_monitor(port, try_call_state.state, mon);
finalize_imm_drv_call(&try_call_state);
- BUMP_REDS(origin, ERTS_PORT_REDS_MONITOR);
+ BUMP_REDS(c_p, ERTS_PORT_REDS_MONITOR);
return ERTS_PORT_OP_DONE;
case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
- return ERTS_PORT_OP_BADARG;
+ return ERTS_PORT_OP_DROPPED;
default:
break; /* Schedule call instead... */
}
sigdp = erts_port_task_alloc_p2p_sig_data();
sigdp->flags = ERTS_P2P_SIG_TYPE_MONITOR;
- sigdp->u.monitor.origin = origin->common.id;
- sigdp->u.monitor.name = name; /* either named monitor, or port id */
+ sigdp->u.monitor.port_id = port->common.id;
+ sigdp->u.monitor.mon = mon;
/* Ref contents will be initialized here */
- return erts_schedule_proc2port_signal(origin, port, origin->common.id,
- refp, sigdp, 0, NULL,
+ return erts_schedule_proc2port_signal(c_p,
+ port,
+ c_p->common.id,
+ NULL,
+ sigdp,
+ 0,
+ NULL,
port_sig_monitor);
}
-static void
-port_demonitor_failure(Eterm port_id, Eterm origin, Eterm ref)
-{
- Process *origin_p;
- ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
- ErtsMonitor *mon1;
- ASSERT(is_internal_pid(origin));
-
- origin_p = erts_pid2proc(NULL, 0, origin, rp_locks);
- if (! origin_p) { return; }
-
- /* do not send any DOWN messages, drop monitors on process */
- mon1 = erts_remove_monitor(&ERTS_P_MONITORS(origin_p), ref);
- if (mon1 != NULL) {
- erts_destroy_monitor(mon1);
- }
-
- erts_smp_proc_unlock(origin_p, rp_locks);
-}
-
/* Origin wants to demonitor port Prt. State contains possible error, which has
* happened just before. Ref is reference to monitor */
static void
-port_demonitor(Port *port, erts_aint32_t state, Eterm origin, Eterm ref)
+port_demonitor(Port *port, erts_aint32_t state, ErtsMonitor *mon)
{
- ASSERT(port);
- ASSERT(is_pid(origin));
- ASSERT(is_internal_ref(ref));
-
- if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
- ErtsProcLocks p_locks = ERTS_PROC_LOCK_LINK;
- Process *origin_p = erts_pid2proc(NULL, 0, origin, p_locks);
- if (origin_p) {
- ErtsMonitor *mon1 = erts_remove_monitor(&ERTS_P_MONITORS(origin_p),
- ref);
- if (mon1 != NULL) {
- erts_destroy_monitor(mon1);
- }
- }
- if (1) {
- ErtsMonitor *mon2 = erts_remove_monitor(&ERTS_P_MONITORS(port),
- ref);
- if (mon2 != NULL) {
- erts_destroy_monitor(mon2);
- }
- }
- if (origin_p) { /* when origin is dying, it won't be found */
- erts_smp_proc_unlock(origin_p, p_locks);
- }
- } else {
- port_demonitor_failure(port->common.id, origin, ref);
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
+ ASSERT(port && mon);
+ ASSERT(erts_monitor_is_origin(mon));
+ if (!erts_monitor_is_in_table(&mdp->target))
+ erts_monitor_release(mon);
+ else {
+ erts_monitor_list_delete(&ERTS_P_LT_MONITORS(port), &mdp->target);
+ erts_monitor_release_both(mdp);
}
}
@@ -3247,73 +2723,47 @@ static int
port_sig_demonitor(Port *prt, erts_aint32_t state, int op,
ErtsProc2PortSigData *sigdp)
{
- Eterm hp[REF_THING_SIZE];
- Eterm ref = make_internal_ref(&hp);
- write_ref_thing(hp, sigdp->u.demonitor.ref[0],
- sigdp->u.demonitor.ref[1],
- sigdp->u.demonitor.ref[2]);
- if (op == ERTS_PROC2PORT_SIG_EXEC) {
- port_demonitor(prt, state, sigdp->u.demonitor.origin, ref);
- } else {
- port_demonitor_failure(sigdp->u.demonitor.name,
- sigdp->u.demonitor.origin,
- ref);
- }
- if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) {
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
- }
+ if (op == ERTS_PROC2PORT_SIG_EXEC)
+ port_demonitor(prt, state, sigdp->u.demonitor.mon);
+ else
+ erts_monitor_release(sigdp->u.demonitor.mon);
return ERTS_PORT_REDS_DEMONITOR;
}
-/* Removes monitor between origin and target, identified by ref.
- * Mode defines normal or relaxed demonitor rules (process is at death) */
-ErtsPortOpResult erts_port_demonitor(Process *origin, ErtsDemonitorMode mode,
- Port *target, Eterm ref,
- Eterm *trap_ref)
+ErtsPortOpResult
+erts_port_demonitor(Process *c_p, Port *prt, ErtsMonitor *mon)
{
- Process *c_p = mode == ERTS_PORT_DEMONITOR_NORMAL ? origin : NULL;
ErtsProc2PortSigData *sigdp;
ErtsTryImmDrvCallState try_call_state
= ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
c_p,
- target, ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ prt, ERTS_PORT_SFLGS_INVALID_LOOKUP,
0,
- !trap_ref,
+ !0,
am_demonitor);
- ASSERT(origin);
- ASSERT(target);
- ASSERT(is_internal_ref(ref));
+ ASSERT(c_p && prt && mon);
switch (try_imm_drv_call(&try_call_state)) {
case ERTS_TRY_IMM_DRV_CALL_OK:
- port_demonitor(target, try_call_state.state, origin->common.id, ref);
+ port_demonitor(prt, try_call_state.state, mon);
finalize_imm_drv_call(&try_call_state);
- if (mode == ERTS_PORT_DEMONITOR_NORMAL) {
- BUMP_REDS(origin, ERTS_PORT_REDS_DEMONITOR);
- }
+ BUMP_REDS(c_p, ERTS_PORT_REDS_DEMONITOR);
return ERTS_PORT_OP_DONE;
case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
- return ERTS_PORT_OP_BADARG;
+ return ERTS_PORT_OP_DROPPED;
default:
break; /* Schedule call instead... */
}
sigdp = erts_port_task_alloc_p2p_sig_data();
sigdp->flags = ERTS_P2P_SIG_TYPE_DEMONITOR;
- sigdp->u.demonitor.origin = origin->common.id;
- sigdp->u.demonitor.name = target->common.id;
- {
- RefThing *reft = ref_thing_ptr(ref);
- /* Start from 1 skip ref arity */
- sys_memcpy(sigdp->u.demonitor.ref,
- internal_thing_ref_numbers(reft),
- sizeof(sigdp->u.demonitor.ref));
- }
+ sigdp->u.demonitor.port_id = prt->common.id;
+ sigdp->u.demonitor.mon = mon;
/* Ref contents will be initialized here */
- return erts_schedule_proc2port_signal(c_p, target, origin->common.id,
- trap_ref, sigdp, 0, NULL,
+ return erts_schedule_proc2port_signal(c_p, prt, c_p->common.id,
+ NULL, sigdp, 0, NULL,
port_sig_demonitor);
}
@@ -3322,11 +2772,13 @@ init_ack_send_reply(Port *port, Eterm resp)
{
if (!is_internal_port(resp)) {
- Process *rp = erts_proc_lookup_raw(port->async_open_port->to);
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
- erts_remove_link(&ERTS_P_LINKS(port), port->async_open_port->to);
- erts_remove_link(&ERTS_P_LINKS(rp), port->common.id);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ Eterm proc = port->async_open_port->to;
+ ErtsLink *lnk = erts_link_tree_lookup(ERTS_P_LINKS(port),
+ proc);
+ if (lnk) {
+ erts_link_tree_delete(&ERTS_P_LINKS(port), lnk);
+ erts_proc_sig_send_unlink(NULL, lnk);
+ }
}
port_sched_op_reply(port->async_open_port->to,
port->async_open_port->ref,
@@ -3353,7 +2805,7 @@ erl_drv_init_ack(ErlDrvPort ix, ErlDrvData res) {
break;
case -2: {
char *str = erl_errno_id(errno);
- resp = erts_atom_put((byte *) str, strlen(str),
+ resp = erts_atom_put((byte *) str, sys_strlen(str),
ERTS_ATOM_ENC_LATIN1, 1);
break;
}
@@ -3388,11 +2840,11 @@ void erts_init_io(int port_tab_size,
int port_tab_size_ignore_files,
int legacy_port_tab)
{
- ErlDrvEntry** dp;
+ ErtsStaticDriver* dp;
UWord common_element_size;
- erts_smp_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- drv_list_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
- drv_list_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ drv_list_rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ drv_list_rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED;
erts_atomic64_init_nob(&bytes_in, 0);
erts_atomic64_init_nob(&bytes_out, 0);
@@ -3400,11 +2852,9 @@ void erts_init_io(int port_tab_size,
common_element_size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
common_element_size += ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErtsPortTaskBusyPortQ));
common_element_size += 10; /* name */
-#ifdef ERTS_SMP
common_element_size += sizeof(erts_mtx_t);
init_xports_list_alloc();
-#endif
pdl_init();
@@ -3419,13 +2869,12 @@ void erts_init_io(int port_tab_size,
else if (port_tab_size < ERTS_MIN_PORTS)
port_tab_size = ERTS_MIN_PORTS;
- erts_smp_rwmtx_init_opt(&erts_driver_list_lock,
- &drv_list_rwmtx_opts,
- "driver_list");
+ erts_rwmtx_init_opt(&erts_driver_list_lock, &drv_list_rwmtx_opts, "driver_list", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
driver_list = NULL;
- erts_smp_tsd_key_create(&driver_list_lock_status_key,
+ erts_tsd_key_create(&driver_list_lock_status_key,
"erts_driver_list_lock_status_key");
- erts_smp_tsd_key_create(&driver_list_last_error_key,
+ erts_tsd_key_create(&driver_list_last_error_key,
"erts_driver_list_last_error_key");
erts_ptab_init_table(&erts_port,
@@ -3433,15 +2882,15 @@ void erts_init_io(int port_tab_size,
NULL,
(ErtsPTabElementCommon *) &erts_invalid_port.common,
port_tab_size,
- common_element_size, /* Doesn't need to be excact */
+ common_element_size, /* Doesn't need to be exact */
"port_table",
legacy_port_tab,
1);
sys_init_io();
- erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1);
- erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
+ erts_tsd_set(driver_list_lock_status_key, (void *) 1);
+ erts_rwmtx_rwlock(&erts_driver_list_lock);
init_driver(&fd_driver, &fd_driver_entry, NULL);
init_driver(&vanilla_driver, &vanilla_driver_entry, NULL);
@@ -3450,76 +2899,101 @@ void erts_init_io(int port_tab_size,
init_driver(&forker_driver, &forker_driver_entry, NULL);
#endif
erts_init_static_drivers();
- for (dp = driver_tab; *dp != NULL; dp++)
- erts_add_driver_entry(*dp, NULL, 1);
+ for (dp = driver_tab; dp->de != NULL; dp++)
+ erts_add_driver_entry(dp->de, NULL, 1, dp->taint);
- erts_smp_tsd_set(driver_list_lock_status_key, NULL);
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_tsd_set(driver_list_lock_status_key, NULL);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
-#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
-static ERTS_INLINE void lcnt_enable_drv_lock_count(erts_driver_t *dp, int enable)
+#if defined(ERTS_ENABLE_LOCK_COUNT)
+static void lcnt_enable_driver_lock_count(erts_driver_t *dp, int enable)
{
if (dp->lock) {
- if (enable)
- erts_lcnt_init_lock_x(&dp->lock->lcnt,
- "driver_lock",
- ERTS_LCNT_LT_MUTEX,
- erts_atom_put((byte*)dp->name,
- sys_strlen(dp->name),
- ERTS_ATOM_ENC_LATIN1,
- 1));
-
- else
- erts_lcnt_destroy_lock(&dp->lock->lcnt);
-
+ if (enable) {
+ erts_lcnt_install_new_lock_info(&dp->lock->lcnt, "driver_lock",
+ dp->name_atom, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ } else {
+ erts_lcnt_uninstall(&dp->lock->lcnt);
+ }
}
}
-static ERTS_INLINE void lcnt_enable_port_lock_count(Port *prt, int enable)
+static void lcnt_enable_port_lock_count(Port *prt, int enable)
{
erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
- if (!enable) {
- erts_lcnt_destroy_lock(&prt->sched.mtx.lcnt);
- if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
- erts_lcnt_destroy_lock(&prt->lock->lcnt);
+
+ if(enable) {
+ ErlDrvPDL pdl = prt->port_data_lock;
+
+ erts_lcnt_install_new_lock_info(&prt->sched.mtx.lcnt, "port_sched_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+
+ if(pdl) {
+ erts_lcnt_install_new_lock_info(&pdl->mtx.lcnt, "port_data_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+
+ if(state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ erts_lcnt_install_new_lock_info(&prt->lock->lcnt, "port_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+ } else {
+ erts_lcnt_uninstall(&prt->sched.mtx.lcnt);
+
+ if(prt->port_data_lock) {
+ erts_lcnt_uninstall(&prt->port_data_lock->mtx.lcnt);
+ }
+
+ if(state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ erts_lcnt_uninstall(&prt->lock->lcnt);
+ }
}
- else {
- erts_lcnt_init_lock_x(&prt->sched.mtx.lcnt,
- "port_sched_lock",
- ERTS_LCNT_LT_MUTEX,
- prt->common.id);
- if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
- erts_lcnt_init_lock_x(&prt->lock->lcnt,
- "port_lock",
- ERTS_LCNT_LT_MUTEX,
- prt->common.id);
+}
+
+void erts_lcnt_update_driver_locks(int enable) {
+ erts_driver_t *driver;
+
+ lcnt_enable_driver_lock_count(&vanilla_driver, enable);
+ lcnt_enable_driver_lock_count(&spawn_driver, enable);
+#ifndef __WIN32__
+ lcnt_enable_driver_lock_count(&forker_driver, enable);
+#endif
+ lcnt_enable_driver_lock_count(&fd_driver, enable);
+
+ erts_rwmtx_rlock(&erts_driver_list_lock);
+
+ for (driver = driver_list; driver; driver = driver->next) {
+ lcnt_enable_driver_lock_count(driver, enable);
}
+
+ erts_rwmtx_runlock(&erts_driver_list_lock);
}
-void erts_lcnt_enable_io_lock_count(int enable) {
- erts_driver_t *dp;
- int ix, max = erts_ptab_max(&erts_port);
- Port *prt;
+void erts_lcnt_update_port_locks(int enable) {
+ int i, max;
- for (ix = 0; ix < max; ix++) {
- if ((prt = erts_pix2port(ix)) != NULL) {
- lcnt_enable_port_lock_count(prt, enable);
+ max = erts_ptab_max(&erts_port);
+
+ for(i = 0; i < max; i++) {
+ int delay_handle;
+ Port *port;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+ port = erts_pix2port(i);
+
+ if(port != NULL) {
+ lcnt_enable_port_lock_count(port, enable);
}
- } /* for all ports */
- lcnt_enable_drv_lock_count(&vanilla_driver, enable);
- lcnt_enable_drv_lock_count(&spawn_driver, enable);
-#ifndef __WIN32__
- lcnt_enable_drv_lock_count(&forker_driver, enable);
-#endif
- lcnt_enable_drv_lock_count(&fd_driver, enable);
- /* enable lock counting in all drivers */
- for (dp = driver_list; dp; dp = dp->next) {
- lcnt_enable_drv_lock_count(dp, enable);
+ if(delay_handle != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ }
}
-} /* enable/disable lock counting of ports */
-#endif /* defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) */
+}
+
+#endif /* defined(ERTS_ENABLE_LOCK_COUNT) */
+
/*
* Buffering of data when using line oriented I/O on ports
*/
@@ -3698,12 +3172,10 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res)
ErtsProcLocks rp_locks = 0;
int scheduler = erts_get_scheduler_id() != 0;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
ASSERT(!prt || prt->common.id == sender);
-#ifdef ERTS_SMP
- ASSERT(!prt || erts_lc_is_port_locked(prt));
-#endif
+ ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt));
ASSERT(is_internal_port(sender) && is_internal_pid(pid));
@@ -3731,7 +3203,7 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res)
erts_queue_message(rp, rp_locks, mp, tuple, sender);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
if (!scheduler)
erts_proc_dec_refc(rp);
@@ -3762,8 +3234,8 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
int scheduler = erts_get_scheduler_id() != 0;
int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
need = 3 + 3 + 2*hlen;
@@ -3789,25 +3261,11 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
if ((state & ERTS_PORT_SFLG_BINARY_IO) == 0) {
listp = buf_to_intlist(&hp, buf, len, listp);
} else if (buf != NULL) {
- ProcBin* pb;
- Binary* bptr;
-
- bptr = erts_bin_nrml_alloc(len);
- erts_refc_init(&bptr->refc, 1);
+ Binary* bptr = erts_bin_nrml_alloc(len);
sys_memcpy(bptr->orig_bytes, buf, len);
- pb = (ProcBin *) hp;
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = len;
- pb->next = ohp->first;
- ohp->first = (struct erl_off_heap_header*)pb;
- pb->val = bptr;
- pb->bytes = (byte*) bptr->orig_bytes;
- pb->flags = 0;
+ listp = erts_build_proc_bin(ohp, hp, bptr);
hp += PROC_BIN_SIZE;
-
- OH_OVERHEAD(ohp, pb->size / sizeof(Eterm));
- listp = make_binary(pb);
}
/* Prepend the header */
@@ -3828,10 +3286,9 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
if (trace_send)
trace_port_send(prt, to, tuple, 1);
- ERL_MESSAGE_TOKEN(mp) = am_undefined;
erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
if (!scheduler)
erts_proc_dec_refc(rp);
}
@@ -3866,7 +3323,7 @@ static void flush_linebuf_messages(Port *prt, erts_aint32_t state)
LineBufContext lc;
int ret;
- ERTS_SMP_LC_ASSERT(!prt || erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt));
if (!prt)
return;
@@ -3910,8 +3367,8 @@ deliver_vec_message(Port* prt, /* Port */
erts_aint32_t state;
int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
/*
* Check arguments for validity.
@@ -4000,9 +3457,8 @@ deliver_vec_message(Port* prt, /* Port */
if (IS_TRACED_FL(prt, F_TRACE_SEND))
trace_port_send(prt, to, tuple, 1);
- ERL_MESSAGE_TOKEN(mp) = am_undefined;
erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id);
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
if (!scheduler)
erts_proc_dec_refc(rp);
}
@@ -4037,8 +3493,8 @@ static void flush_port(Port *p)
{
int fpe_was_unmasked;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(p));
if (p->drv_ptr->flush != NULL) {
ERTS_MSACC_PUSH_STATE_M();
@@ -4070,11 +3526,9 @@ static void flush_port(Port *p)
if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(p, am_out, am_flush);
}
-#ifdef ERTS_SMP
if (p->xports)
erts_port_handle_xports(p);
ASSERT(!p->xports);
-#endif
}
if ((erts_atomic32_read_nob(&p->state) & ERTS_PORT_SFLGS_DEAD) == 0
&& is_port_ioq_empty(p)) {
@@ -4092,11 +3546,12 @@ terminate_port(Port *prt)
erts_aint32_t state;
ErtsPrtSD *psd;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT(!ERTS_P_LINKS(prt));
ASSERT(!ERTS_P_MONITORS(prt));
+ ASSERT(!ERTS_P_LT_MONITORS(prt));
/* state may be altered by kill_port() below */
state = erts_atomic32_read_band_nob(&prt->state,
@@ -4135,11 +3590,9 @@ terminate_port(Port *prt)
(*drv->stop)((ErlDrvData)prt->drv_data);
erts_unblock_fpe(fpe_was_unmasked);
ERTS_MSACC_POP_STATE_M();
-#ifdef ERTS_SMP
if (prt->xports)
erts_port_handle_xports(prt);
ASSERT(!prt->xports);
-#endif
}
if (is_internal_port(send_closed_port_id)
@@ -4147,9 +3600,9 @@ terminate_port(Port *prt)
trace_port_send(prt, connected_id, am_closed, 1);
if(drv->handle != NULL) {
- erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(drv->handle);
- erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_rwmtx_runlock(&erts_driver_list_lock);
}
stopq(prt); /* clear queue memory */
if(prt->linebuf != NULL){
@@ -4159,12 +3612,12 @@ terminate_port(Port *prt)
erts_cleanup_port_data(prt);
- psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd);
+ ASSERT(erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY) == NULL);
+
+ psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd);
if (psd)
erts_free(ERTS_ALC_T_PRTSD, psd);
- ASSERT(prt->dist_entry == NULL);
-
kill_port(prt);
/*
@@ -4172,7 +3625,7 @@ terminate_port(Port *prt)
* port has been removed from the port table (in kill_port()).
*/
if ((state & ERTS_PORT_SFLG_HALT)
- && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) {
+ && (erts_atomic32_dec_read_nob(&erts_halt_progress) == 0)) {
erts_port_release(prt); /* We will exit and never return */
erts_flush_async_exit(erts_halt_code, "");
}
@@ -4186,145 +3639,25 @@ erts_terminate_port(Port *pp)
terminate_port(pp);
}
-static void port_fire_one_monitor(ErtsMonitor *mon, void *ctx0);
-static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc)
-{
- switch (mon->type) {
- case MON_ORIGIN: {
- ErtsMonitor *rmon;
- Process *rp;
-
- ASSERT(is_internal_pid(mon->pid));
- rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
- if (!rp) {
- goto done;
- }
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- if (rmon == NULL) {
- goto done;
- }
- erts_destroy_monitor(rmon);
- } break;
- case MON_TARGET: {
- port_fire_one_monitor(mon, vpsc); /* forward call */
- } break;
- }
- done:
- erts_destroy_monitor(mon);
-}
-
-
-
typedef struct {
- Port *port;
+ Eterm port_id;
Eterm reason;
-} SweepContext;
+} ErtsPortExitContext;
-static void sweep_one_link(ErtsLink *lnk, void *vpsc)
+static void link_port_exit(ErtsLink *lnk, void *vpectxt)
{
- SweepContext *psc = vpsc;
- DistEntry *dep;
- Process *rp;
- Eterm port_id = psc->port->common.id;
-
- ASSERT(lnk->type == LINK_PID);
-
- if (IS_TRACED_FL(psc->port, F_TRACE_PORTS))
- trace_port(psc->port, am_unlink, lnk->pid);
-
- if (is_external_pid(lnk->pid)) {
- dep = external_pid_dist_entry(lnk->pid);
- if(dep != erts_this_dist_entry) {
- ErtsDistLinkData dld;
- ErtsDSigData dsd;
- int code;
- code = erts_dsig_prepare(&dsd, dep, NULL, ERTS_DSP_NO_LOCK, 0);
- switch (code) {
- case ERTS_DSIG_PREP_NOT_ALIVE:
- case ERTS_DSIG_PREP_NOT_CONNECTED:
- break;
- case ERTS_DSIG_PREP_CONNECTED:
- erts_remove_dist_link(&dld, port_id, lnk->pid, dep);
- erts_destroy_dist_link(&dld);
- code = erts_dsig_send_exit(&dsd, port_id, lnk->pid,
- psc->reason);
- ASSERT(code == ERTS_DSIG_SEND_OK);
- break;
- default:
- ASSERT(! "Invalid dsig prepare result");
- break;
- }
- }
- } else {
- ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND;
- ASSERT(is_internal_pid(lnk->pid));
- rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
- if (rp) {
- ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), port_id);
-
- if (rlnk) {
- int xres = erts_send_exit_signal(NULL,
- port_id,
- rp,
- &rp_locks,
- psc->reason,
- NIL,
- NULL,
- 0);
- if (xres >= 0) {
- if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
- rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
- }
- /* We didn't exit the process and it is traced */
- if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(NULL, 0, rp, am_getting_unlinked, port_id);
- }
- erts_destroy_link(rlnk);
- }
-
- erts_smp_proc_unlock(rp, rp_locks);
- }
- }
- erts_destroy_link(lnk);
+ ErtsPortExitContext *pectxt = vpectxt;
+ erts_proc_sig_send_link_exit(NULL, pectxt->port_id,
+ lnk, pectxt->reason, NIL);
}
-static void
-port_fire_one_monitor(ErtsMonitor *mon, void *ctx0)
+static void monitor_port_exit(ErtsMonitor *mon, void *vpectxt)
{
- Process *origin;
- ErtsProcLocks origin_locks;
-
- if (mon->type != MON_TARGET || ! is_pid(mon->pid)) {
- return;
- }
- /*
- * Proceed here if someone monitors us, we (port) are the target and
- * origin is some process
- */
- origin_locks = ERTS_PROC_LOCKS_MSG_SEND | ERTS_PROC_LOCK_LINK;
-
- origin = erts_pid2proc(NULL, 0, mon->pid, origin_locks);
- if (origin) {
- DeclareTmpHeapNoproc(lhp,3);
- SweepContext *ctx = (SweepContext *)ctx0;
- ErtsMonitor *rmon;
- Eterm watched = (is_atom(mon->name)
- ? TUPLE2(lhp, mon->name, erts_this_dist_entry->sysname)
- : ctx->port->common.id);
-
- erts_queue_monitor_message(origin, &origin_locks, mon->ref, am_port,
- watched, ctx->reason);
- UnUseTmpHeapNoproc(3);
-
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(origin), mon->ref);
- erts_smp_proc_unlock(origin, origin_locks);
-
- if (rmon) {
- erts_destroy_monitor(rmon);
- }
- }
+ ErtsPortExitContext *pectxt = vpectxt;
+ if (erts_monitor_is_target(mon))
+ erts_proc_sig_send_monitor_down(mon, pectxt->reason);
+ else
+ erts_proc_sig_send_demonitor(mon);
}
/* 'from' is sending 'this_port' an exit signal, (this_port must be internal).
@@ -4341,12 +3674,15 @@ int
erts_deliver_port_exit(Port *prt, Eterm from, Eterm reason, int send_closed,
int drop_normal)
{
- ErtsLink *lnk;
+ ErtsLink *links;
+ ErtsMonitor *monitors;
+ ErtsMonitor *lt_monitors;
Eterm modified_reason;
erts_aint32_t state, set_state_flags;
+ ErtsPortExitContext pectxt;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
modified_reason = (reason == am_kill) ? am_killed : reason;
@@ -4391,28 +3727,43 @@ erts_deliver_port_exit(Port *prt, Eterm from, Eterm reason, int send_closed,
set_busy_port(ERTS_Port2ErlDrvPort(prt), 0);
+ links = ERTS_P_LINKS(prt);
+ ERTS_P_LINKS(prt) = NULL;
+ monitors = ERTS_P_MONITORS(prt);
+ ERTS_P_MONITORS(prt) = NULL;
+ lt_monitors = ERTS_P_LT_MONITORS(prt);
+ ERTS_P_LT_MONITORS(prt) = NULL;
+
if (prt->common.u.alive.reg != NULL)
(void) erts_unregister_name(NULL, 0, prt, prt->common.u.alive.reg->name);
- {
- SweepContext sc = {prt, modified_reason};
- lnk = ERTS_P_LINKS(prt);
- ERTS_P_LINKS(prt) = NULL;
- erts_sweep_links(lnk, &sweep_one_link, &sc);
+ pectxt.port_id = prt->common.id;
+ pectxt.reason = modified_reason;
+
+ if (links)
+ erts_monitor_tree_foreach_delete(&links,
+ link_port_exit,
+ (void *) &pectxt);
+
+ if (monitors || lt_monitors) {
+ DRV_MONITOR_LOCK_PDL(prt);
+ if (monitors)
+ erts_monitor_tree_foreach_delete(&monitors,
+ monitor_port_exit,
+ (void *) &pectxt);
+ if (lt_monitors)
+ erts_monitor_list_foreach_delete(&lt_monitors,
+ monitor_port_exit,
+ (void *) &pectxt);
+ DRV_MONITOR_UNLOCK_PDL(prt);
}
- DRV_MONITOR_LOCK_PDL(prt);
- {
- SweepContext ctx = {prt, modified_reason};
- ErtsMonitor *moni = ERTS_P_MONITORS(prt);
- ERTS_P_MONITORS(prt) = NULL;
- erts_sweep_monitors(moni, &sweep_one_monitor, &ctx);
- }
- DRV_MONITOR_UNLOCK_PDL(prt);
-
- if ((state & ERTS_PORT_SFLG_DISTRIBUTION) && prt->dist_entry) {
- erts_do_net_exits(prt->dist_entry, modified_reason);
- erts_deref_dist_entry(prt->dist_entry);
- prt->dist_entry = NULL;
+
+ if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
+ ASSERT(dep);
+ erts_do_net_exits(dep, modified_reason);
+ erts_deref_dist_entry(dep);
+ erts_prtsd_set(prt, ERTS_PRTSD_DIST_ENTRY, NULL);
erts_atomic32_read_band_relb(&prt->state,
~ERTS_PORT_SFLG_DISTRIBUTION);
}
@@ -4560,8 +3911,7 @@ static void
cleanup_scheduled_control(Binary *binp, char *bufp)
{
if (binp) {
- if (erts_refc_dectest(&binp->refc, 0) == 0)
- erts_bin_free(binp);
+ erts_bin_release(binp);
}
else {
if (bufp)
@@ -4614,17 +3964,9 @@ write_port_control_result(int control_flags,
else {
dbin = (ErlDrvBinary *) resp_bufp;
if (dbin->orig_size > ERL_ONHEAP_BIN_LIMIT) {
- ProcBin* pb = (ProcBin *) *hpp;
+ res = erts_build_proc_bin(ohp, *hpp, ErlDrvBinary2Binary(dbin));
*hpp += PROC_BIN_SIZE;
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = dbin->orig_size;
- pb->next = ohp->first;
- ohp->first = (struct erl_off_heap_header *) pb;
- pb->val = ErlDrvBinary2Binary(dbin);
- pb->bytes = (byte*) dbin->orig_bytes;
- pb->flags = 0;
- OH_OVERHEAD(ohp, dbin->orig_size / sizeof(Eterm));
- return make_binary(pb);
+ return res;
}
resp_bufp = dbin->orig_bytes;
resp_size = dbin->orig_size;
@@ -4708,7 +4050,7 @@ port_sig_control(Port *prt,
prt);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
goto done;
}
}
@@ -4761,7 +4103,7 @@ erts_port_control(Process* c_p,
int copy;
ErtsProc2PortSigData *sigdp;
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
if (sched_flags & ERTS_PTS_FLG_EXIT)
return ERTS_PORT_OP_BADARG;
@@ -4884,14 +4226,28 @@ erts_port_control(Process* c_p,
ASSERT(!tmp_alloced);
if (*ebinp == HEADER_SUB_BIN)
ebinp = binary_val(((ErlSubBin *) ebinp)->orig);
+
if (*ebinp != HEADER_PROC_BIN)
copy = 1;
else {
- binp = ((ProcBin *) ebinp)->val;
+ ProcBin *pb = (ProcBin *) ebinp;
+ int offset = bufp - pb->val->orig_bytes;
+
+ ASSERT(pb->val->orig_bytes <= bufp
+ && bufp + size <= pb->val->orig_bytes + pb->val->orig_size);
+
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+
+ /* The procbin may have been reallocated, so update bufp */
+ bufp = pb->val->orig_bytes + offset;
+ }
+
+ binp = pb->val;
ASSERT(bufp <= bufp + size);
ASSERT(binp->orig_bytes <= bufp
&& bufp + size <= binp->orig_bytes + binp->orig_size);
- erts_refc_inc(&binp->refc, 1);
+ erts_refc_inc(&binp->intern.refc, 1);
}
}
@@ -4918,10 +4274,9 @@ erts_port_control(Process* c_p,
0,
NULL,
port_sig_control);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- cleanup_scheduled_control(binp, bufp);
+ if (res != ERTS_PORT_OP_SCHEDULED)
return ERTS_PORT_OP_BADARG;
- }
+
return res;
}
@@ -5060,11 +4415,11 @@ port_sig_call(Port *prt,
prt);
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
goto done;
}
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
}
}
@@ -5098,7 +4453,7 @@ erts_port_call(Process* c_p,
erts_aint32_t sched_flags;
ErtsProc2PortSigData *sigdp;
- sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ sched_flags = erts_atomic32_read_nob(&prt->sched.flags);
if (sched_flags & ERTS_PTS_FLG_EXIT) {
return ERTS_PORT_OP_BADARG;
}
@@ -5211,10 +4566,9 @@ erts_port_call(Process* c_p,
0,
NULL,
port_sig_call);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- cleanup_scheduled_call(bufp);
+ if (res != ERTS_PORT_OP_SCHEDULED)
return ERTS_PORT_OP_BADARG;
- }
+
return res;
}
@@ -5317,7 +4671,7 @@ port_sig_info(Port *prt,
prt);
}
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
return ERTS_PORT_REDS_INFO;
}
@@ -5386,7 +4740,7 @@ typedef struct {
Uint sched_id;
Eterm pid;
Uint32 refn[ERTS_REF_NUMBERS];
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
} ErtsIOBytesReq;
static void
@@ -5415,7 +4769,7 @@ reply_io_bytes(void *vreq)
rp_locks = ERTS_PROC_LOCK_MAIN;
}
- hsz = 5 /* 4-tuple */ + REF_THING_SIZE;
+ hsz = 5 /* 4-tuple */ + ERTS_REF_THING_SIZE;
erts_bld_uint64(NULL, &hsz, in);
erts_bld_uint64(NULL, &hsz, out);
@@ -5424,7 +4778,7 @@ reply_io_bytes(void *vreq)
ref = make_internal_ref(hp);
write_ref_thing(hp, req->refn[0], req->refn[1], req->refn[2]);
- hp += REF_THING_SIZE;
+ hp += ERTS_REF_THING_SIZE;
ein = erts_bld_uint64(&hp, NULL, in);
eout = erts_bld_uint64(&hp, NULL, out);
@@ -5436,10 +4790,10 @@ reply_io_bytes(void *vreq)
if (req->sched_id == sched_id)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
}
- if (erts_smp_atomic32_dec_read_nob(&req->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&req->refc) == 0)
erts_free(ERTS_ALC_T_IOB_REQ, req);
}
@@ -5453,7 +4807,7 @@ erts_request_io_bytes(Process *c_p)
ErtsIOBytesReq *req = erts_alloc(ERTS_ALC_T_IOB_REQ,
sizeof(ErtsIOBytesReq));
- hp = HAlloc(c_p, REF_THING_SIZE);
+ hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
ref = erts_sched_make_ref_in_buffer(esdp, hp);
refn = internal_ref_numbers(ref);
@@ -5462,16 +4816,14 @@ erts_request_io_bytes(Process *c_p)
req->refn[0] = refn[0];
req->refn[1] = refn[1];
req->refn[2] = refn[2];
- erts_smp_atomic32_init_nob(&req->refc,
+ erts_atomic32_init_nob(&req->refc,
(erts_aint32_t) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_io_bytes,
(void *) req);
-#endif
reply_io_bytes((void *) req);
@@ -5480,24 +4832,115 @@ erts_request_io_bytes(Process *c_p)
typedef struct {
- int to;
+ fmtfn_t to;
void *arg;
} prt_one_lnk_data;
static void prt_one_monitor(ErtsMonitor *mon, void *vprtd)
{
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
prt_one_lnk_data *prtd = (prt_one_lnk_data *) vprtd;
- erts_print(prtd->to, prtd->arg, "(%T,%T)", mon->pid,mon->ref);
+ if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
+ erts_print(prtd->to, prtd->arg, "(%p,%T)", mon->other.ptr, mdp->ref);
+ else
+ erts_print(prtd->to, prtd->arg, "(%T,%T)", mon->other.item, mdp->ref);
}
static void prt_one_lnk(ErtsLink *lnk, void *vprtd)
{
prt_one_lnk_data *prtd = (prt_one_lnk_data *) vprtd;
- erts_print(prtd->to, prtd->arg, "%T", lnk->pid);
+ erts_print(prtd->to, prtd->arg, "%T", lnk->other.item);
+}
+
+static void dump_port_state(fmtfn_t to, void *arg, erts_aint32_t state)
+{
+ erts_aint32_t rest;
+ int unknown = 0;
+ char delim = ' ';
+
+ erts_print(to, arg, "State:");
+
+ rest = state;
+ while (rest) {
+ erts_aint32_t chk = (rest ^ (rest-1)) & rest; /* lowest set bit */
+ char* s;
+
+ rest &= ~chk;
+ switch (chk) {
+ case ERTS_PORT_SFLG_CONNECTED: s = "CONNECTED"; break;
+ case ERTS_PORT_SFLG_EXITING: s = "EXITING"; break;
+ case ERTS_PORT_SFLG_DISTRIBUTION: s = "DISTR"; break;
+ case ERTS_PORT_SFLG_BINARY_IO: s = "BINARY_IO"; break;
+ case ERTS_PORT_SFLG_SOFT_EOF: s = "SOFT_EOF"; break;
+ case ERTS_PORT_SFLG_CLOSING: s = "CLOSING"; break;
+ case ERTS_PORT_SFLG_SEND_CLOSED: s = "SEND_CLOSED"; break;
+ case ERTS_PORT_SFLG_LINEBUF_IO: s = "LINEBUF_IO"; break;
+ case ERTS_PORT_SFLG_FREE: s = "FREE"; break;
+ case ERTS_PORT_SFLG_INITIALIZING: s = "INITIALIZING"; break;
+ case ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK: s = "PORT_LOCK"; break;
+ case ERTS_PORT_SFLG_INVALID: s = "INVALID"; break;
+ case ERTS_PORT_SFLG_HALT: s = "HALT"; break;
+#ifdef DEBUG
+ case ERTS_PORT_SFLG_PORT_DEBUG: s = "DEBUG"; break;
+#endif
+ default:
+ unknown = 1;
+ continue;
+ }
+ erts_print(to, arg, "%c%s", delim, s);
+ delim = '|';
+ }
+ if (unknown || !state)
+ erts_print(to, arg, "%c0x%x\n", delim, state);
+ else
+ erts_print(to, arg, "\n");
+}
+
+static void dump_port_task_flags(fmtfn_t to, void *arg, Port* p)
+{
+ erts_aint32_t flags = erts_atomic32_read_nob(&p->sched.flags);
+ erts_aint32_t unknown = 0;
+ char delim = ' ';
+
+ if (!flags)
+ return;
+
+ erts_print(to, arg, "Task Flags:");
+
+ while (flags) {
+ erts_aint32_t chk = (flags ^ (flags-1)) & flags; /* lowest set bit */
+ char* s;
+
+ flags &= ~chk;
+ switch (chk) {
+ case ERTS_PTS_FLG_IN_RUNQ: s = "IN_RUNQ"; break;
+ case ERTS_PTS_FLG_EXEC: s = "EXEC"; break;
+ case ERTS_PTS_FLG_HAVE_TASKS: s = "HAVE_TASKS"; break;
+ case ERTS_PTS_FLG_EXIT: s = "EXIT"; break;
+ case ERTS_PTS_FLG_BUSY_PORT: s = "BUSY_PORT"; break;
+ case ERTS_PTS_FLG_BUSY_PORT_Q: s = "BUSY_Q"; break;
+ case ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q: s = "CHK_UNSET_BUSY_Q"; break;
+ case ERTS_PTS_FLG_HAVE_BUSY_TASKS: s = "BUSY_TASKS"; break;
+ case ERTS_PTS_FLG_HAVE_NS_TASKS: s = "NS_TASKS"; break;
+ case ERTS_PTS_FLG_PARALLELISM: s = "PARALLELISM"; break;
+ case ERTS_PTS_FLG_FORCE_SCHED: s = "FORCE_SCHED"; break;
+ case ERTS_PTS_FLG_EXITING: s = "EXITING"; break;
+ case ERTS_PTS_FLG_EXEC_IMM: s = "EXEC_IMM"; break;
+ default:
+ unknown |= chk;
+ continue;
+ }
+ erts_print(to, arg, "%c%s", delim, s);
+ delim = '|';
+ }
+ if (unknown)
+ erts_print(to, arg, "%cUNKNOWN(0x%x)\n", delim, unknown);
+ else
+ erts_print(to, arg, "\n");
}
void
-print_port_info(Port *p, int to, void *arg)
+print_port_info(Port *p, fmtfn_t to, void *arg)
{
erts_aint32_t state = erts_atomic32_read_nob(&p->state);
@@ -5505,6 +4948,8 @@ print_port_info(Port *p, int to, void *arg)
return;
erts_print(to, arg, "=port:%T\n", p->common.id);
+ dump_port_state(to, arg, state);
+ dump_port_task_flags(to, arg, p);
erts_print(to, arg, "Slot: %d\n", internal_port_index(p->common.id));
if (state & ERTS_PORT_SFLG_CONNECTED) {
erts_print(to, arg, "Connected: %T", ERTS_PORT_GET_CONNECTED(p));
@@ -5516,17 +4961,24 @@ print_port_info(Port *p, int to, void *arg)
prtd.to = to;
prtd.arg = arg;
erts_print(to, arg, "Links: ");
- erts_doforall_links(ERTS_P_LINKS(p), &prt_one_lnk, &prtd);
+ erts_link_tree_foreach(ERTS_P_LINKS(p), prt_one_lnk, (void *) &prtd);
erts_print(to, arg, "\n");
}
- if (ERTS_P_MONITORS(p)) {
+ if (ERTS_P_MONITORS(p) || ERTS_P_LT_MONITORS(p)) {
prt_one_lnk_data prtd;
prtd.to = to;
prtd.arg = arg;
erts_print(to, arg, "Monitors: ");
- erts_doforall_monitors(ERTS_P_MONITORS(p), &prt_one_monitor, &prtd);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(p), prt_one_monitor,
+ (void *) &prtd);
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(p), prt_one_monitor,
+ (void *) &prtd);
erts_print(to, arg, "\n");
}
+ if (p->suspended) {
+ erts_print(to, arg, "Suspended: ");
+ erts_proclist_dump(to, arg, p->suspended);
+ }
if (p->common.u.alive.reg != NULL)
erts_print(to, arg, "Registered as: %T\n", p->common.u.alive.reg->name);
@@ -5544,6 +4996,14 @@ print_port_info(Port *p, int to, void *arg)
} else {
erts_print(to, arg, "Port controls linked-in driver: %s\n",p->name);
}
+ erts_print(to, arg, "Input: %beu\n", p->bytes_in);
+ erts_print(to, arg, "Output: %beu\n", p->bytes_out);
+ erts_print(to, arg, "Queue: %beu\n", erts_ioq_size(&p->ioq));
+ {
+ Eterm port_data = erts_port_data_read(p);
+ if (port_data != am_undefined)
+ erts_print(to, arg, "Port Data: %T\n", port_data);
+ }
}
void
@@ -5556,14 +5016,14 @@ set_busy_port(ErlDrvPort dprt, int on)
DTRACE_CHARBUF(port_str, 16);
#endif
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
prt = erts_drvport2port(dprt);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return;
if (on) {
- flags = erts_smp_atomic32_read_bor_acqb(&prt->sched.flags,
+ flags = erts_atomic32_read_bor_acqb(&prt->sched.flags,
ERTS_PTS_FLG_BUSY_PORT);
if (flags & ERTS_PTS_FLG_BUSY_PORT)
return; /* Already busy */
@@ -5579,7 +5039,7 @@ set_busy_port(ErlDrvPort dprt, int on)
}
#endif
} else {
- flags = erts_smp_atomic32_read_band_acqb(&prt->sched.flags,
+ flags = erts_atomic32_read_band_acqb(&prt->sched.flags,
~ERTS_PTS_FLG_BUSY_PORT);
if (!(flags & ERTS_PTS_FLG_BUSY_PORT))
return; /* Already non-busy */
@@ -5591,7 +5051,7 @@ set_busy_port(ErlDrvPort dprt, int on)
DTRACE1(port_not_busy, port_str);
}
#endif
- if (prt->dist_entry) {
+ if (erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY) != NULL) {
/*
* Processes suspended on distribution ports are
* normally queued on the dist entry.
@@ -5673,7 +5133,7 @@ int get_port_flags(ErlDrvPort ix)
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return 0;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
flags = 0;
if (state & ERTS_PORT_SFLG_BINARY_IO)
@@ -5689,8 +5149,8 @@ void erts_raw_port_command(Port* p, byte* buf, Uint len)
int fpe_was_unmasked;
ERTS_MSACC_PUSH_STATE_M();
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(p));
if (len > (Uint) INT_MAX)
erts_exit(ERTS_ABORT_EXIT,
@@ -5719,10 +5179,10 @@ int async_ready(Port *p, void* data)
{
int need_free = 1;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (p) {
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(p));
if (p->drv_ptr->ready_async != NULL) {
ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT);
#ifdef USE_VM_PROBES
@@ -5791,24 +5251,17 @@ erts_stale_drv_select(Eterm port,
switch (mode) {
case ERL_DRV_READ | ERL_DRV_WRITE:
type = "Input/Output";
- goto deselect;
case ERL_DRV_WRITE:
type = "Output";
- goto deselect;
case ERL_DRV_READ:
type = "Input";
- deselect:
- if (deselect) {
- driver_select(drv_port, hndl,
- mode | ERL_DRV_USE_NO_CALLBACK,
- 0);
- }
- break;
default:
- type = "Event";
- if (deselect)
- driver_event(drv_port, hndl, NULL);
- break;
+ type = "";
+ }
+ if (deselect) {
+ driver_select(drv_port, hndl,
+ mode | ERL_DRV_USE_NO_CALLBACK,
+ 0);
}
dsbufp = erts_create_logger_dsbuf();
@@ -5907,8 +5360,8 @@ void driver_report_exit(ErlDrvPort ix, int status)
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
pid = ERTS_PORT_GET_CONNECTED(prt);
ASSERT(is_internal_pid(pid));
@@ -5928,10 +5381,9 @@ void driver_report_exit(ErlDrvPort ix, int status)
if (IS_TRACED_FL(prt, F_TRACE_SEND))
trace_port_send(prt, pid, tuple, 1);
- ERL_MESSAGE_TOKEN(mp) = am_undefined;
erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id);
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
if (!scheduler)
erts_proc_dec_refc(rp);
}
@@ -6385,22 +5837,12 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len)
mess = make_binary(hbp);
}
else {
- ProcBin* pbp;
+ Eterm* hp;
Binary* bp = erts_bin_nrml_alloc(size);
ASSERT(bufp);
- erts_refc_init(&bp->refc, 1);
sys_memcpy((void *) bp->orig_bytes, (void *) bufp, size);
- pbp = (ProcBin *) erts_produce_heap(&factory,
- PROC_BIN_SIZE, HEAP_EXTRA);
- pbp->thing_word = HEADER_PROC_BIN;
- pbp->size = size;
- pbp->next = factory.off_heap->first;
- factory.off_heap->first = (struct erl_off_heap_header*)pbp;
- pbp->val = bp;
- pbp->bytes = (byte*) bp->orig_bytes;
- pbp->flags = 0;
- OH_OVERHEAD(factory.off_heap, pbp->size / sizeof(Eterm));
- mess = make_binary(pbp);
+ hp = erts_produce_heap(&factory, PROC_BIN_SIZE, HEAP_EXTRA);
+ mess = erts_build_proc_bin(factory.off_heap, hp, bp);
}
ptr += 2;
break;
@@ -6544,8 +5986,6 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len)
from = prt->common.id;
}
- /* send message */
- ERL_MESSAGE_TOKEN(factory.message) = am_undefined;
erts_queue_message(rp, rp_locks, factory.message, mess, from);
}
else if (res == -2) {
@@ -6572,7 +6012,7 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len)
}
if (rp) {
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
if (!scheduler)
erts_proc_dec_refc(rp);
}
@@ -6587,9 +6027,7 @@ static ERTS_INLINE int
deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p,
Port **trace_prt)
{
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
-#endif
erts_aint32_t state;
int res = 1;
Port *prt = erts_port_lookup_raw((Eterm) port_id);
@@ -6607,24 +6045,20 @@ deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p,
goto done;
}
if (connected_p) {
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
ETHR_MEMBAR(ETHR_LoadLoad);
-#endif
*connected_p = ERTS_PORT_GET_CONNECTED(prt);
}
done:
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
- ERTS_SMP_LC_ASSERT(!prt || !erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(!prt || !erts_lc_is_port_locked(prt));
erts_thr_progress_unmanaged_continue(dhndl);
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
} else
-#endif
if (res == 1) {
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
*trace_prt = prt;
}
return res;
@@ -6652,13 +6086,13 @@ driver_output_term(ErlDrvPort drvport, ErlDrvTermData* data, int len)
erts_aint32_t state;
Port* prt;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
/* NOTE! It *not* safe to access 'drvport' from unmanaged threads. */
prt = erts_drvport2port_state(drvport, &state);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1; /* invalid (dead) */
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
@@ -6695,16 +6129,14 @@ driver_send_term(ErlDrvPort drvport,
* internal data representation for ErlDrvPort.
*/
Port* prt = NULL;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
-#ifdef ERTS_SMP
+ ERTS_CHK_NO_PROC_LOCKS;
if (erts_thr_progress_is_managed_thread())
-#endif
{
erts_aint32_t state;
prt = erts_drvport2port_state(drvport, &state);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1; /* invalid (dead) */
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
}
@@ -6724,11 +6156,11 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
Port* prt = erts_drvport2port_state(ix, &state);
ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
@@ -6738,8 +6170,12 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
else
erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len));
if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
+ DistEntry* dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
+ Uint32 conn_id = (Uint32)(UWord) erts_prtsd_get(prt, ERTS_PRTSD_CONN_ID);
+ erts_atomic64_inc_nob(&dep->in);
return erts_net_message(prt,
- prt->dist_entry,
+ dep,
+ conn_id,
(byte*) hbuf, hlen,
(byte*) (bin->orig_bytes+offs), len);
}
@@ -6763,12 +6199,12 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
Port* prt = erts_drvport2port_state(ix, &state);
ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
@@ -6778,14 +6214,19 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
else
erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len));
if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
+ DistEntry *dep = (DistEntry*) erts_prtsd_get(prt, ERTS_PRTSD_DIST_ENTRY);
+ Uint32 conn_id = (Uint32)(UWord) erts_prtsd_get(prt, ERTS_PRTSD_CONN_ID);
+ erts_atomic64_inc_nob(&dep->in);
if (len == 0)
return erts_net_message(prt,
- prt->dist_entry,
+ dep,
+ conn_id,
NULL, 0,
(byte*) hbuf, hlen);
else
return erts_net_message(prt,
- prt->dist_entry,
+ dep,
+ conn_id,
(byte*) hbuf, hlen,
(byte*) buf, len);
}
@@ -6802,7 +6243,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
int driver_output(ErlDrvPort ix, char* buf, ErlDrvSizeT len)
{
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
return driver_output2(ix, NULL, 0, buf, len);
}
@@ -6818,7 +6259,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
erts_aint32_t state;
ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
ASSERT(vec->size >= skip);
if (vec->size <= skip)
@@ -6829,7 +6270,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
@@ -6890,12 +6331,6 @@ ErlDrvSizeT driver_vec_to_buf(ErlIOVec *vec, char *buf, ErlDrvSizeT len)
return (orig_len - len);
}
-
-/*
- * - driver_alloc_binary() is thread safe (efile driver depend on it).
- * - driver_realloc_binary(), and driver_free_binary() are *not* thread safe.
- */
-
/*
* reference count on driver binaries...
*/
@@ -6904,21 +6339,21 @@ ErlDrvSInt
driver_binary_get_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_read(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_read(&bp->intern.refc, 1);
}
ErlDrvSInt
driver_binary_inc_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_inctest(&bp->refc, 2);
+ return (ErlDrvSInt) erts_refc_inctest(&bp->intern.refc, 2);
}
ErlDrvSInt
driver_binary_dec_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_dectest(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_dectest(&bp->intern.refc, 1);
}
@@ -6934,30 +6369,18 @@ driver_alloc_binary(ErlDrvSizeT size)
bin = erts_bin_drv_alloc_fnf((Uint) size);
if (!bin)
return NULL; /* The driver write must take action */
- erts_refc_init(&bin->refc, 1);
return Binary2ErlDrvBinary(bin);
}
-/* Reallocate space hold by binary */
+/* Reallocate space held by binary */
ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, ErlDrvSizeT size)
{
Binary* oldbin;
Binary* newbin;
- if (!bin) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Bad use of driver_realloc_binary(%p, %lu): "
- "called with ",
- bin, (unsigned long)size);
- if (!bin) {
- erts_dsprintf(dsbufp, "NULL pointer as first argument");
- }
- erts_send_warning_to_logger_nogl(dsbufp);
- if (!bin)
- return driver_alloc_binary(size);
- }
+ if (!bin)
+ return driver_alloc_binary(size);
oldbin = ErlDrvBinary2Binary(bin);
newbin = (Binary *) erts_bin_realloc_fnf(oldbin, size);
@@ -6971,18 +6394,11 @@ ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, ErlDrvSizeT size)
void driver_free_binary(ErlDrvBinary* dbin)
{
Binary *bin;
- if (!dbin) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Bad use of driver_free_binary(%p): called with "
- "NULL pointer as argument", dbin);
- erts_send_warning_to_logger_nogl(dsbufp);
+ if (!dbin)
return;
- }
bin = ErlDrvBinary2Binary(dbin);
- if (erts_refc_dectest(&bin->refc, 0) == 0)
- erts_bin_free(bin);
+ erts_bin_release(bin);
}
@@ -7068,7 +6484,6 @@ static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl)
erts_free(ERTS_ALC_T_PORT_DATA_LOCK, pdl);
}
-#ifdef ERTS_SMP
static void driver_monitor_lock_pdl(Port *p) {
if (p->port_data_lock) {
@@ -7077,7 +6492,7 @@ static void driver_monitor_lock_pdl(Port *p) {
/* Now we either have the port lock or the port_data_lock */
ERTS_LC_ASSERT(!p->port_data_lock
|| erts_lc_mtx_is_locked(&(p->port_data_lock->mtx)));
- ERTS_SMP_LC_ASSERT(p->port_data_lock
+ ERTS_LC_ASSERT(p->port_data_lock
|| erts_lc_is_port_locked(p));
}
@@ -7085,14 +6500,13 @@ static void driver_monitor_unlock_pdl(Port *p) {
/* We should either have the port lock or the port_data_lock */
ERTS_LC_ASSERT(!p->port_data_lock
|| erts_lc_mtx_is_locked(&(p->port_data_lock->mtx)));
- ERTS_SMP_LC_ASSERT(p->port_data_lock
+ ERTS_LC_ASSERT(p->port_data_lock
|| erts_lc_is_port_locked(p));
if (p->port_data_lock) {
driver_pdl_unlock(p->port_data_lock);
}
}
-#endif
/*
* exported driver_pdl_* functions ...
@@ -7107,7 +6521,7 @@ driver_pdl_create(ErlDrvPort dp)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
- erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id, 1);
+ erts_mtx_init(&pdl->mtx, "port_data_lock", pp->common.id, ERTS_LOCK_FLAGS_CATEGORY_IO);
pdl_init_refc(pdl);
erts_port_inc_refc(pp);
pdl->prt = pp;
@@ -7171,307 +6585,51 @@ driver_pdl_dec_refc(ErlDrvPDL pdl)
return refc;
}
-/* expand queue to hold n elements in tail or head */
-static int expandq(ErlIOQueue* q, int n, int tail)
-/* tail: 0 if make room in head, make room in tail otherwise */
-{
- int h_sz; /* room before header */
- int t_sz; /* room after tail */
- int q_sz; /* occupied */
- int nvsz;
- SysIOVec* niov;
- ErlDrvBinary** nbinv;
-
- h_sz = q->v_head - q->v_start;
- t_sz = q->v_end - q->v_tail;
- q_sz = q->v_tail - q->v_head;
-
- if (tail && (n <= t_sz)) /* do we need to expand tail? */
- return 0;
- else if (!tail && (n <= h_sz)) /* do we need to expand head? */
- return 0;
- else if (n > (h_sz + t_sz)) { /* need to allocate */
- /* we may get little extra but it ok */
- nvsz = (q->v_end - q->v_start) + n;
-
- niov = erts_alloc_fnf(ERTS_ALC_T_IOQ, nvsz * sizeof(SysIOVec));
- if (!niov)
- return -1;
- nbinv = erts_alloc_fnf(ERTS_ALC_T_IOQ, nvsz * sizeof(ErlDrvBinary**));
- if (!nbinv) {
- erts_free(ERTS_ALC_T_IOQ, (void *) niov);
- return -1;
- }
- if (tail) {
- sys_memcpy(niov, q->v_head, q_sz*sizeof(SysIOVec));
- if (q->v_start != q->v_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
- q->v_start = niov;
- q->v_end = niov + nvsz;
- q->v_head = q->v_start;
- q->v_tail = q->v_head + q_sz;
-
- sys_memcpy(nbinv, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- if (q->b_start != q->b_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
- q->b_start = nbinv;
- q->b_end = nbinv + nvsz;
- q->b_head = q->b_start;
- q->b_tail = q->b_head + q_sz;
- }
- else {
- sys_memcpy(niov+nvsz-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
- if (q->v_start != q->v_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
- q->v_start = niov;
- q->v_end = niov + nvsz;
- q->v_tail = q->v_end;
- q->v_head = q->v_tail - q_sz;
-
- sys_memcpy(nbinv+nvsz-q_sz, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- if (q->b_start != q->b_small)
- erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
- q->b_start = nbinv;
- q->b_end = nbinv + nvsz;
- q->b_tail = q->b_end;
- q->b_head = q->b_tail - q_sz;
- }
- }
- else if (tail) { /* move to beginning to make room in tail */
- sys_memmove(q->v_start, q->v_head, q_sz*sizeof(SysIOVec));
- q->v_head = q->v_start;
- q->v_tail = q->v_head + q_sz;
- sys_memmove(q->b_start, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- q->b_head = q->b_start;
- q->b_tail = q->b_head + q_sz;
- }
- else { /* move to end to make room */
- sys_memmove(q->v_end-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
- q->v_tail = q->v_end;
- q->v_head = q->v_tail-q_sz;
- sys_memmove(q->b_end-q_sz, q->b_head, q_sz*sizeof(ErlDrvBinary*));
- q->b_tail = q->b_end;
- q->b_head = q->b_tail-q_sz;
- }
-
- return 0;
-}
-
-
-
/* Put elements from vec at q tail */
int driver_enqv(ErlDrvPort ix, ErlIOVec* vec, ErlDrvSizeT skip)
{
- int n;
- size_t len;
- ErlDrvSizeT size;
- SysIOVec* iov;
- ErlDrvBinary** binv;
- ErlDrvBinary* b;
- ErlIOQueue* q = drvport2ioq(ix);
-
- if (q == NULL)
- return -1;
-
- ASSERT(vec->size >= skip); /* debug only */
- if (vec->size <= skip)
- return 0;
- size = vec->size - skip;
-
- iov = vec->iov;
- binv = vec->binv;
- n = vec->vsize;
-
- /* we use do here to strip iov_len=0 from beginning */
- do {
- len = iov->iov_len;
- if (len <= skip) {
- skip -= len;
- iov++;
- binv++;
- n--;
- }
- else {
- iov->iov_base = ((char *)(iov->iov_base)) + skip;
- iov->iov_len -= skip;
- skip = 0;
- }
- } while(skip > 0);
-
- if (q->v_tail + n >= q->v_end)
- expandq(q, n, 1);
-
- /* Queue and reference all binaries (remove zero length items) */
- while(n--) {
- if ((len = iov->iov_len) > 0) {
- if ((b = *binv) == NULL) { /* speical case create binary ! */
- b = driver_alloc_binary(len);
- sys_memcpy(b->orig_bytes, iov->iov_base, len);
- *q->b_tail++ = b;
- q->v_tail->iov_len = len;
- q->v_tail->iov_base = b->orig_bytes;
- q->v_tail++;
- }
- else {
- driver_binary_inc_refc(b);
- *q->b_tail++ = b;
- *q->v_tail++ = *iov;
- }
- }
- iov++;
- binv++;
- }
- q->size += size; /* update total size in queue */
- return 0;
+ ASSERT(vec->size >= skip);
+ return erts_ioq_enqv(drvport2ioq(ix), (ErtsIOVec*)vec, skip);
}
/* Put elements from vec at q head */
int driver_pushqv(ErlDrvPort ix, ErlIOVec* vec, ErlDrvSizeT skip)
{
- int n;
- size_t len;
- ErlDrvSizeT size;
- SysIOVec* iov;
- ErlDrvBinary** binv;
- ErlDrvBinary* b;
- ErlIOQueue* q = drvport2ioq(ix);
-
- if (q == NULL)
- return -1;
-
- if (vec->size <= skip)
- return 0;
- size = vec->size - skip;
-
- iov = vec->iov;
- binv = vec->binv;
- n = vec->vsize;
-
- /* we use do here to strip iov_len=0 from beginning */
- do {
- len = iov->iov_len;
- if (len <= skip) {
- skip -= len;
- iov++;
- binv++;
- n--;
- }
- else {
- iov->iov_base = ((char *)(iov->iov_base)) + skip;
- iov->iov_len -= skip;
- skip = 0;
- }
- } while(skip > 0);
-
- if (q->v_head - n < q->v_start)
- expandq(q, n, 0);
-
- /* Queue and reference all binaries (remove zero length items) */
- iov += (n-1); /* move to end */
- binv += (n-1); /* move to end */
- while(n--) {
- if ((len = iov->iov_len) > 0) {
- if ((b = *binv) == NULL) { /* speical case create binary ! */
- b = driver_alloc_binary(len);
- sys_memcpy(b->orig_bytes, iov->iov_base, len);
- *--q->b_head = b;
- q->v_head--;
- q->v_head->iov_len = len;
- q->v_head->iov_base = b->orig_bytes;
- }
- else {
- driver_binary_inc_refc(b);
- *--q->b_head = b;
- *--q->v_head = *iov;
- }
- }
- iov--;
- binv--;
- }
- q->size += size; /* update total size in queue */
- return 0;
+ ASSERT(vec->size >= skip);
+ return erts_ioq_pushqv(drvport2ioq(ix), (ErtsIOVec*)vec, skip);
}
-
/*
** Remove size bytes from queue head
** Return number of bytes that remain in queue
*/
ErlDrvSizeT driver_deq(ErlDrvPort ix, ErlDrvSizeT size)
{
- ErlIOQueue* q = drvport2ioq(ix);
- ErlDrvSizeT len;
-
- if ((q == NULL) || (q->size < size))
- return -1;
- q->size -= size;
- while (size > 0) {
- ASSERT(q->v_head != q->v_tail);
-
- len = q->v_head->iov_len;
- if (len <= size) {
- size -= len;
- driver_free_binary(*q->b_head);
- *q->b_head++ = NULL;
- q->v_head++;
- }
- else {
- q->v_head->iov_base = ((char *)(q->v_head->iov_base)) + size;
- q->v_head->iov_len -= size;
- size = 0;
- }
- }
-
- /* restart pointers (optimised for enq) */
- if (q->v_head == q->v_tail) {
- q->v_head = q->v_tail = q->v_start;
- q->b_head = q->b_tail = q->b_start;
- }
- return q->size;
+ ErlPortIOQueue *q = drvport2ioq(ix);
+ if (erts_ioq_deq(q, size) == -1)
+ return -1;
+ return erts_ioq_size(q);
}
-ErlDrvSizeT driver_peekqv(ErlDrvPort ix, ErlIOVec *ev) {
- ErlIOQueue *q = drvport2ioq(ix);
- ASSERT(ev);
-
- if (! q) {
- return (ErlDrvSizeT) -1;
- } else {
- if ((ev->vsize = q->v_tail - q->v_head) == 0) {
- ev->size = 0;
- ev->iov = NULL;
- ev->binv = NULL;
- } else {
- ev->size = q->size;
- ev->iov = q->v_head;
- ev->binv = q->b_head;
- }
- return q->size;
- }
+ErlDrvSizeT driver_peekqv(ErlDrvPort ix, ErlIOVec *ev)
+{
+ return erts_ioq_peekqv(drvport2ioq(ix), (ErtsIOVec*)ev);
}
SysIOVec* driver_peekq(ErlDrvPort ix, int* vlenp) /* length of io-vector */
{
- ErlIOQueue* q = drvport2ioq(ix);
-
- if (q == NULL) {
- *vlenp = -1;
- return NULL;
- }
- if ((*vlenp = (q->v_tail - q->v_head)) == 0)
- return NULL;
- return q->v_head;
+ return erts_ioq_peekq(drvport2ioq(ix), vlenp);
}
ErlDrvSizeT driver_sizeq(ErlDrvPort ix)
{
- ErlIOQueue* q = drvport2ioq(ix);
+ ErlPortIOQueue *q = drvport2ioq(ix);
if (q == NULL)
- return (size_t) -1;
- return q->size;
+ return (ErlDrvSizeT) -1;
+ return erts_ioq_size(q);
}
@@ -7551,7 +6709,7 @@ int driver_set_timer(ErlDrvPort ix, unsigned long t)
{
Port* prt = erts_drvport2port(ix);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
@@ -7568,7 +6726,7 @@ int driver_cancel_timer(ErlDrvPort ix)
Port* prt = erts_drvport2port(ix);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
erts_cancel_port_timer(prt);
return 0;
}
@@ -7579,11 +6737,11 @@ driver_read_timer(ErlDrvPort ix, unsigned long* t)
Port* prt = erts_drvport2port(ix);
Sint64 left;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
left = erts_read_port_timer(prt);
if (left < 0)
@@ -7598,7 +6756,7 @@ int
driver_get_now(ErlDrvNowData *now_data)
{
Uint mega,secs,micro;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (now_data == NULL) {
return -1;
@@ -7632,41 +6790,47 @@ erl_drv_convert_time_unit(ErlDrvTime val,
(int) to);
}
-static void ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
+void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
{
- RefThing *refp;
- ASSERT(is_internal_ref(ref));
- ERTS_CT_ASSERT(sizeof(RefThing) <= sizeof(ErlDrvMonitor));
- refp = ref_thing_ptr(ref);
- memset(mon,0,sizeof(ErlDrvMonitor));
- memcpy(mon,refp,sizeof(RefThing));
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Uint) <= sizeof(ErlDrvMonitor));
+ ASSERT(is_internal_ordinary_ref(ref));
+ sys_memcpy((void *) mon, (void *) internal_ref_val(ref),
+ ERTS_REF_THING_SIZE*sizeof(Uint));
}
+Eterm erts_driver_monitor_to_ref(Eterm *hp, const ErlDrvMonitor *mon)
+{
+ Eterm ref;
+ ERTS_CT_ASSERT(ERTS_REF_THING_SIZE*sizeof(Uint) <= sizeof(ErlDrvMonitor));
+ sys_memcpy((void *) hp, (void *) mon, ERTS_REF_THING_SIZE*sizeof(Uint));
+ ref = make_internal_ref(hp);
+ ASSERT(is_internal_ordinary_ref(ref));
+ return ref;
+}
static int do_driver_monitor_process(Port *prt,
- Eterm *buf,
ErlDrvTermData process,
ErlDrvMonitor *monitor)
{
- Process *rp;
+ Eterm buf[ERTS_REF_THING_SIZE];
Eterm ref;
+ ErtsMonitorData *mdp;
- if (prt->drv_ptr->process_exit == NULL) {
+ if (!prt->drv_ptr->process_exit)
return -1;
- }
- rp = erts_pid2proc_opt(NULL, 0,
- (Eterm) process, ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- if (!rp) {
- return 1;
- }
ref = erts_make_ref_in_buffer(buf);
- erts_add_monitor(&ERTS_P_MONITORS(prt), MON_ORIGIN, ref, rp->common.id, NIL);
- erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, prt->common.id, NIL);
+ mdp = erts_monitor_create(ERTS_MON_TYPE_PORT, ref,
+ prt->common.id, process, NIL);
+
+ if (!erts_proc_sig_send_monitor(&mdp->target, process)) {
+ erts_monitor_release_both(mdp);
+ return 1;
+ }
+
+ erts_monitor_tree_insert(&ERTS_P_MONITORS(prt), &mdp->origin);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- ref_to_driver_monitor(ref,monitor);
+ erts_ref_to_driver_monitor(ref,monitor);
return 0;
}
@@ -7679,7 +6843,7 @@ int driver_monitor_process(ErlDrvPort drvport,
{
Port *prt;
int ret;
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
ErtsSchedulerData *sched = erts_get_scheduler_data();
#endif
@@ -7689,51 +6853,26 @@ int driver_monitor_process(ErlDrvPort drvport,
/* Now (in SMP) we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
- ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_monitor_process(prt,buf,process,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock));
+ ret = do_driver_monitor_process(prt,process,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-static int do_driver_demonitor_process(Port *prt, Eterm *buf,
- const ErlDrvMonitor *monitor)
+static int do_driver_demonitor_process(Port *prt, const ErlDrvMonitor *monitor)
{
- Process *rp;
+ Eterm heap[ERTS_REF_THING_SIZE];
Eterm ref;
ErtsMonitor *mon;
- Eterm to;
- memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
- ref = make_internal_ref(buf);
- mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
- if (mon == NULL) {
- return 1;
- }
- ASSERT(mon->type == MON_ORIGIN);
- to = mon->pid;
- ASSERT(is_internal_pid(to));
- rp = erts_pid2proc_opt(NULL,
- 0,
- to,
- ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- mon = erts_remove_monitor(&ERTS_P_MONITORS(prt), ref);
- if (mon) {
- erts_destroy_monitor(mon);
- }
- if (rp) {
- ErtsMonitor *rmon;
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- if (rmon != NULL) {
- erts_destroy_monitor(rmon);
- }
- }
+ ref = erts_driver_monitor_to_ref(heap, monitor);
+
+ mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(prt), ref);
+ if (!mon || !erts_monitor_is_origin(mon))
+ return 1;
+
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(prt), mon);
+ erts_proc_sig_send_demonitor(mon);
return 0;
}
@@ -7742,7 +6881,7 @@ int driver_demonitor_process(ErlDrvPort drvport,
{
Port *prt;
int ret;
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
ErtsSchedulerData *sched = erts_get_scheduler_data();
#endif
@@ -7752,34 +6891,26 @@ int driver_demonitor_process(ErlDrvPort drvport,
/* Now we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
- ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_demonitor_process(prt,buf,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock));
+ ret = do_driver_demonitor_process(prt,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-static ErlDrvTermData do_driver_get_monitored_process(Port *prt, Eterm *buf,
- const ErlDrvMonitor *monitor)
+static ErlDrvTermData do_driver_get_monitored_process(Port *prt,const ErlDrvMonitor *monitor)
{
Eterm ref;
ErtsMonitor *mon;
- Eterm to;
+ Eterm heap[ERTS_REF_THING_SIZE];
+
+ ref = erts_driver_monitor_to_ref(heap, monitor);
- memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
- ref = make_internal_ref(buf);
- mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
- if (mon == NULL) {
+ mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(prt), ref);
+ if (!mon || !erts_monitor_is_origin(mon))
return driver_term_nil;
- }
- ASSERT(mon->type == MON_ORIGIN);
- to = mon->pid;
- ASSERT(is_internal_pid(to));
- return (ErlDrvTermData) to;
+
+ ASSERT(is_internal_pid(mon->other.item));
+ return (ErlDrvTermData) mon->other.item;
}
@@ -7788,7 +6919,7 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport,
{
Port *prt;
ErlDrvTermData ret;
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#if defined(ERTS_ENABLE_LOCK_CHECK)
ErtsSchedulerData *sched = erts_get_scheduler_data();
#endif
@@ -7798,42 +6929,40 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport,
/* Now we should have either the port lock (if we have a scheduler) or the port data lock
(if we're a driver thread) */
- ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock));
- {
- DeclareTmpHeapNoproc(buf,REF_THING_SIZE);
- UseTmpHeapNoproc(REF_THING_SIZE);
- ret = do_driver_get_monitored_process(prt,buf,monitor);
- UnUseTmpHeapNoproc(REF_THING_SIZE);
- }
+ ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock));
+ ret = do_driver_get_monitored_process(prt,monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
return ret;
}
-
int driver_compare_monitors(const ErlDrvMonitor *monitor1,
const ErlDrvMonitor *monitor2)
{
- return memcmp(monitor1,monitor2,sizeof(ErlDrvMonitor));
+ return sys_memcmp((void *) monitor1, (void *) monitor2,
+ ERTS_REF_THING_SIZE*sizeof(Eterm));
}
-void erts_fire_port_monitor(Port *prt, Eterm ref)
+void erts_fire_port_monitor(Port *prt, ErtsMonitor *tmon)
{
- ErtsMonitor *rmon;
+ ErtsMonitorData *mdp;
void (*callback)(ErlDrvData drv_data, ErlDrvMonitor *monitor);
ErlDrvMonitor drv_monitor;
int fpe_was_unmasked;
ERTS_MSACC_PUSH_STATE_M();
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- ASSERT(prt->drv_ptr != NULL);
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ASSERT(prt->drv_ptr != NULL);
+ ASSERT(erts_monitor_is_target(tmon));
+ mdp = erts_monitor_to_data(tmon);
DRV_MONITOR_LOCK_PDL(prt);
- if (erts_lookup_monitor(ERTS_P_MONITORS(prt), ref) == NULL) {
+ if (!erts_monitor_is_in_table(&mdp->origin)) {
DRV_MONITOR_UNLOCK_PDL(prt);
+ erts_monitor_release(tmon);
return;
}
callback = prt->drv_ptr->process_exit;
ASSERT(callback != NULL);
- ref_to_driver_monitor(ref,&drv_monitor);
+ erts_ref_to_driver_monitor(mdp->ref,&drv_monitor);
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
DRV_MONITOR_UNLOCK_PDL(prt);
#ifdef USE_VM_PROBES
@@ -7857,11 +6986,9 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
DRV_MONITOR_LOCK_PDL(prt);
ERTS_MSACC_POP_STATE_M();
/* remove monitor *after* callback */
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(prt), ref);
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(prt), &mdp->origin);
DRV_MONITOR_UNLOCK_PDL(prt);
- if (rmon) {
- erts_destroy_monitor(rmon);
- }
+ erts_monitor_release_both(mdp);
}
@@ -7871,11 +6998,11 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
erts_aint32_t state;
Port* prt = erts_drvport2port_state(ix, &state);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (prt->async_open_port)
init_ack_send_reply(prt, prt->common.id);
@@ -7906,34 +7033,19 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
int driver_exit(ErlDrvPort ix, int err)
{
Port* prt = erts_drvport2port(ix);
- Process* rp;
- ErtsLink *lnk, *rlnk = NULL;
+ ErtsLink *lnk;
Eterm connected;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
connected = ERTS_PORT_GET_CONNECTED(prt);
- rp = erts_pid2proc(NULL, 0, connected, ERTS_PROC_LOCK_LINK);
- if (rp) {
- rlnk = erts_remove_link(&ERTS_P_LINKS(rp),prt->common.id);
- }
-
- lnk = erts_remove_link(&ERTS_P_LINKS(prt), connected);
-
-#ifdef ERTS_SMP
- if (rp)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-#endif
-
- if (rlnk != NULL) {
- erts_destroy_link(rlnk);
- }
-
- if (lnk != NULL) {
- erts_destroy_link(lnk);
+ lnk = erts_link_tree_lookup(ERTS_P_LINKS(prt), connected);
+ if (lnk) {
+ erts_link_tree_delete(&ERTS_P_LINKS(prt), lnk);
+ erts_proc_sig_send_unlink(NULL, lnk);
}
if (err == 0)
@@ -7956,7 +7068,7 @@ int driver_failure_atom(ErlDrvPort ix, char* string)
{
return driver_failure_term(ix,
erts_atom_put((byte *) string,
- strlen(string),
+ sys_strlen(string),
ERTS_ATOM_ENC_LATIN1,
1),
0);
@@ -7980,7 +7092,7 @@ ErlDrvTermData driver_mk_atom(char* string)
sys_strlen(string),
ERTS_ATOM_ENC_LATIN1,
1);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
return (ErlDrvTermData) am;
}
@@ -7989,27 +7101,27 @@ ErlDrvTermData driver_mk_port(ErlDrvPort ix)
Port* prt = erts_drvport2port(ix);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return (ErlDrvTermData) NIL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
return (ErlDrvTermData) prt->common.id;
}
ErlDrvTermData driver_connected(ErlDrvPort ix)
{
Port* prt = erts_drvport2port(ix);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return NIL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
return ERTS_PORT_GET_CONNECTED(prt);
}
ErlDrvTermData driver_caller(ErlDrvPort ix)
{
Port* prt = erts_drvport2port(ix);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return NIL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
return prt->caller;
}
@@ -8018,20 +7130,20 @@ int driver_lock_driver(ErlDrvPort ix)
Port* prt = erts_drvport2port(ix);
DE_Handle* dh;
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
- erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
+ erts_rwmtx_rwlock(&erts_driver_list_lock);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if ((dh = (DE_Handle*)prt->drv_ptr->handle ) == NULL) {
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
return -1;
}
erts_ddll_lock_driver(dh, prt->drv_ptr->name);
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
return 0;
}
@@ -8039,9 +7151,9 @@ int driver_lock_driver(ErlDrvPort ix)
static int maybe_lock_driver_list(void)
{
void *rec_lock;
- rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
+ rec_lock = erts_tsd_get(driver_list_lock_status_key);
if (rec_lock == 0) {
- erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
+ erts_rwmtx_rwlock(&erts_driver_list_lock);
return 1;
}
return 0;
@@ -8049,7 +7161,7 @@ static int maybe_lock_driver_list(void)
static void maybe_unlock_driver_list(int doit)
{
if (doit) {
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
}
/*
@@ -8072,7 +7184,7 @@ void *driver_dl_open(char * path)
{
void *ptr;
int res;
- int *last_error_p = erts_smp_tsd_get(driver_list_last_error_key);
+ int *last_error_p = erts_tsd_get(driver_list_last_error_key);
int locked = maybe_lock_driver_list();
if ((res = erts_sys_ddll_open(path, &ptr, NULL)) == 0) {
maybe_unlock_driver_list(locked);
@@ -8080,7 +7192,7 @@ void *driver_dl_open(char * path)
} else {
if (!last_error_p) {
last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int));
- erts_smp_tsd_set(driver_list_last_error_key,last_error_p);
+ erts_tsd_set(driver_list_last_error_key,last_error_p);
}
*last_error_p = res;
maybe_unlock_driver_list(locked);
@@ -8092,7 +7204,7 @@ void *driver_dl_sym(void * handle, char *func_name)
{
void *ptr;
int res;
- int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key);
+ int *last_error_p = erts_tsd_get(driver_list_lock_status_key);
int locked = maybe_lock_driver_list();
if ((res = erts_sys_ddll_sym(handle, func_name, &ptr)) == 0) {
maybe_unlock_driver_list(locked);
@@ -8100,7 +7212,7 @@ void *driver_dl_sym(void * handle, char *func_name)
} else {
if (!last_error_p) {
last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int));
- erts_smp_tsd_set(driver_list_lock_status_key,last_error_p);
+ erts_tsd_set(driver_list_lock_status_key,last_error_p);
}
*last_error_p = res;
maybe_unlock_driver_list(locked);
@@ -8120,7 +7232,7 @@ int driver_dl_close(void *handle)
char *driver_dl_error(void)
{
char *res;
- int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key);
+ int *last_error_p = erts_tsd_get(driver_list_lock_status_key);
int locked = maybe_lock_driver_list();
res = erts_ddll_error((last_error_p != NULL) ? (*last_error_p) : ERL_DE_ERROR_UNSPECIFIED);
maybe_unlock_driver_list(locked);
@@ -8158,20 +7270,8 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
sip->driver_minor_version = ERL_DRV_EXTENDED_MINOR_VERSION;
sip->erts_version = ERLANG_VERSION;
sip->otp_release = ERLANG_OTP_RELEASE;
- sip->thread_support =
-#ifdef USE_THREADS
- 1
-#else
- 0
-#endif
- ;
- sip->smp_support =
-#ifdef ERTS_SMP
- 1
-#else
- 0
-#endif
- ;
+ sip->thread_support = 1;
+ sip->smp_support = 1;
}
@@ -8197,11 +7297,7 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
*/
if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) {
sip->dirty_scheduler_support =
-#ifdef ERTS_DIRTY_SCHEDULERS
1
-#else
- 0
-#endif
;
}
@@ -8228,14 +7324,6 @@ no_output_callback(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)
}
static void
-no_event_callback(ErlDrvData drv_data, ErlDrvEvent event, ErlDrvEventData event_data)
-{
- Port *prt = get_current_port();
- report_missing_drv_callback(prt, "Event", "event()");
- driver_event(ERTS_Port2ErlDrvPort(prt), event, NULL);
-}
-
-static void
no_ready_input_callback(ErlDrvData drv_data, ErlDrvEvent event)
{
Port *prt = get_current_port();
@@ -8269,38 +7357,31 @@ no_stop_select_callback(ErlDrvEvent event, void* private)
}
#define IS_DRIVER_VERSION_GE(DE,MAJOR,MINOR) \
- ((DE)->major_version >= (MAJOR) && (DE)->minor_version >= (MINOR))
+ ((DE)->major_version > (MAJOR) || \
+ ((DE)->major_version == (MAJOR) && (DE)->minor_version >= (MINOR)))
static int
init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
{
+ drv->name_atom = erts_atom_put((byte*)de->driver_name,
+ sys_strlen(de->driver_name),
+ ERTS_ATOM_ENC_LATIN1, 1);
drv->name = de->driver_name;
+
ASSERT(de->extended_marker == ERL_DRV_EXTENDED_MARKER);
ASSERT(de->major_version >= 2);
drv->version.major = de->major_version;
drv->version.minor = de->minor_version;
drv->flags = de->driver_flags;
drv->handle = handle;
-#ifdef ERTS_SMP
- if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING)
- drv->lock = NULL;
- else {
- drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK,
- sizeof(erts_mtx_t));
- erts_mtx_init_x(drv->lock,
- "driver_lock",
-#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT)
- erts_atom_put((byte *) drv->name,
- sys_strlen(drv->name),
- ERTS_ATOM_ENC_LATIN1,
- 1),
-#else
- NIL,
-#endif
- 1
- );
+ if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING) {
+ drv->lock = NULL;
+ } else {
+ drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK, sizeof(erts_mtx_t));
+
+ erts_mtx_init(drv->lock, "driver_lock", drv->name_atom,
+ ERTS_LOCK_FLAGS_CATEGORY_IO);
}
-#endif
drv->entry = de;
drv->start = de->start;
@@ -8311,7 +7392,6 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->outputv = de->outputv;
drv->control = de->control;
drv->call = de->call;
- drv->event = de->event ? de->event : no_event_callback;
drv->ready_input = de->ready_input ? de->ready_input : no_ready_input_callback;
drv->ready_output = de->ready_output ? de->ready_output : no_ready_output_callback;
drv->timeout = de->timeout ? de->timeout : no_timeout_callback;
@@ -8343,12 +7423,10 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
void
erts_destroy_driver(erts_driver_t *drv)
{
-#ifdef ERTS_SMP
if (drv->lock) {
- erts_smp_mtx_destroy(drv->lock);
+ erts_mtx_destroy(drv->lock);
erts_free(ERTS_ALC_T_DRIVER_LOCK, drv->lock);
}
-#endif
erts_free(ERTS_ALC_T_DRIVER, drv);
}
@@ -8359,21 +7437,22 @@ erts_destroy_driver(erts_driver_t *drv)
void add_driver_entry(ErlDrvEntry *drv){
void *rec_lock;
- rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
+ rec_lock = erts_tsd_get(driver_list_lock_status_key);
/*
* Ignore result of erts_add_driver_entry, the init is not
* allowed to fail when drivers are added by drivers.
*/
- erts_add_driver_entry(drv, NULL, rec_lock != NULL);
+ erts_add_driver_entry(drv, NULL, rec_lock != NULL, 0);
}
-int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_locked)
+int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle,
+ int driver_list_locked, int taint)
{
erts_driver_t *dp = erts_alloc(ERTS_ALC_T_DRIVER, sizeof(erts_driver_t));
- int res;
+ int err = 0;
if (!driver_list_locked) {
- erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
+ erts_rwmtx_rwlock(&erts_driver_list_lock);
}
dp->next = driver_list;
@@ -8384,12 +7463,18 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo
driver_list = dp;
if (!driver_list_locked) {
- erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1);
+ erts_tsd_set(driver_list_lock_status_key, (void *) 1);
}
- res = init_driver(dp, de, handle);
+ if (!err) {
+ err = init_driver(dp, de, handle);
- if (res != 0) {
+ if (taint) {
+ erts_add_taint(dp->name_atom);
+ }
+ }
+
+ if (err) {
/*
* Remove it all again...
*/
@@ -8401,10 +7486,10 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo
}
if (!driver_list_locked) {
- erts_smp_tsd_set(driver_list_lock_status_key, NULL);
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_tsd_set(driver_list_lock_status_key, NULL);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
- return res;
+ return err;
}
/* Not allowed for dynamic drivers */
@@ -8413,9 +7498,9 @@ int remove_driver_entry(ErlDrvEntry *drv)
erts_driver_t *dp;
void *rec_lock;
- rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
+ rec_lock = erts_tsd_get(driver_list_lock_status_key);
if (rec_lock == NULL) {
- erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
+ erts_rwmtx_rwlock(&erts_driver_list_lock);
}
dp = driver_list;
while (dp && dp->entry != drv)
@@ -8423,7 +7508,7 @@ int remove_driver_entry(ErlDrvEntry *drv)
if (dp) {
if (dp->handle) {
if (rec_lock == NULL) {
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
return -1;
}
@@ -8437,12 +7522,12 @@ int remove_driver_entry(ErlDrvEntry *drv)
}
erts_destroy_driver(dp);
if (rec_lock == NULL) {
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
return 1;
}
if (rec_lock == NULL) {
- erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
+ erts_rwmtx_rwunlock(&erts_driver_list_lock);
}
return 0;
}
@@ -8458,13 +7543,27 @@ int null_func(void)
int
erl_drv_putenv(const char *key, char *value)
{
- return erts_sys_putenv_raw((char*)key, value);
+ switch (erts_sys_explicit_8bit_putenv((char*)key, value)) {
+ case -1: /* Insufficient buffer space */
+ return 1;
+ case 1: /* Success */
+ return 0;
+ default: /* Not found */
+ return -1;
+ }
}
int
erl_drv_getenv(const char *key, char *value, size_t *value_size)
{
- return erts_sys_getenv_raw((char*)key, value, value_size);
+ switch (erts_sys_explicit_8bit_getenv((char*)key, value, value_size)) {
+ case -1: /* Insufficient buffer space */
+ return 1;
+ case 1: /* Success */
+ return 0;
+ default: /* Not found */
+ return -1;
+ }
}
/* get heart_port
diff --git a/erts/emulator/beam/lttng-wrapper.h b/erts/emulator/beam/lttng-wrapper.h
index 0bc75c1552..ad4852374a 100644
--- a/erts/emulator/beam/lttng-wrapper.h
+++ b/erts/emulator/beam/lttng-wrapper.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -61,13 +61,13 @@
#define lttng_proc_to_mfa_str(p, Name) \
do { \
if (ERTS_PROC_IS_EXITING((p))) { \
- strcpy(Name, "<exiting>"); \
+ sys_strcpy(Name, "<exiting>"); \
} else { \
BeamInstr *_fptr = find_function_from_pc((p)->i); \
if (_fptr) { \
lttng_mfa_to_str(_fptr[0],_fptr[1],_fptr[2], Name); \
} else { \
- strcpy(Name, "<unknown>"); \
+ sys_strcpy(Name, "<unknown>"); \
} \
} \
} while(0)
diff --git a/erts/emulator/beam/macros.tab b/erts/emulator/beam/macros.tab
new file mode 100644
index 0000000000..494fe8961e
--- /dev/null
+++ b/erts/emulator/beam/macros.tab
@@ -0,0 +1,173 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+//
+// Define a regular expression that will match instructions that
+// perform GC. That will allow beam_makeops to check for instructions
+// that don't use $REFRESH_GEN_DEST() when they should.
+//
+
+GC_REGEXP=erts_garbage_collect|erts_gc|GcBifFunction;
+
+// $Offset is relative to the start of the instruction (not to the
+// location of the failure label reference). Since combined
+// instructions may increment the instruction pointer (e.g. in
+// 'increment') for some of the instructions in the group, we actually
+// use a virtual start position common to all instructions in the
+// group. To calculate the correct virtual position, we will need to
+// add $IP_ADJUSTMENT to the offset. ($IP_ADJUSTMENT will usually be
+// zero, except in a few bit syntax instructions.)
+
+SET_I_REL(Offset) {
+ ASSERT(VALID_INSTR(*(I + ($Offset) + $IP_ADJUSTMENT)));
+ I += $Offset + $IP_ADJUSTMENT;
+}
+
+SET_CP_I_ABS(Target) {
+ c_p->i = $Target;
+ ASSERT(VALID_INSTR(*c_p->i));
+}
+
+SET_REL_I(Dst, Offset) {
+ $Dst = I + ($Offset);
+ ASSERT(VALID_INSTR(*$Dst));
+}
+
+FAIL(Fail) {
+ //| -no_prefetch
+ $SET_I_REL($Fail);
+ Goto(*I);
+}
+
+JUMP(Fail) {
+ //| -no_next
+ $SET_I_REL($Fail);
+ Goto(*I);
+}
+
+GC_TEST(Ns, Nh, Live) {
+ Uint need = $Nh + $Ns;
+ if (ERTS_UNLIKELY(E - HTOP < need)) {
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live, FCALLS);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ }
+ HEAP_SPACE_VERIFIED($Nh);
+}
+
+GC_TEST_PRESERVE(NeedHeap, Live, PreserveTerm) {
+ Uint need = $NeedHeap;
+ if (ERTS_UNLIKELY(E - HTOP < need)) {
+ SWAPOUT;
+ reg[$Live] = $PreserveTerm;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live+1, FCALLS);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ $PreserveTerm = reg[$Live];
+ SWAPIN;
+ }
+ HEAP_SPACE_VERIFIED($NeedHeap);
+}
+
+
+// Make sure that there are NeedStack + NeedHeap + 1 words available
+// on the combined heap/stack segment, then allocates NeedHeap + 1
+// words on the stack and saves CP.
+AH(NeedStack, NeedHeap, Live) {
+ unsigned needed = $NeedStack + 1;
+ $GC_TEST(needed, $NeedHeap, $Live);
+ E -= needed;
+ *E = make_cp(c_p->cp);
+ c_p->cp = 0;
+}
+
+NEXT0() {
+ //| -no_next
+ SET_I((BeamInstr *) $NEXT_INSTRUCTION);
+ Goto(*I);
+}
+
+NEXT(Addr) {
+ //| -no_next
+ SET_I((BeamInstr *) $Addr);
+ Goto(*I);
+}
+
+FAIL_BODY() {
+ //| -no_prefetch
+ goto find_func_info;
+}
+
+FAIL_HEAD_OR_BODY(Fail) {
+ //| -no_prefetch
+
+ /*
+ * In a correctly working program, we expect failures in
+ * guards to be more likely than failures in bodies.
+ */
+
+ if (ERTS_LIKELY($Fail)) {
+ $FAIL($Fail);
+ }
+ goto find_func_info;
+}
+
+BADARG(Fail) {
+ c_p->freason = BADARG;
+ $FAIL_HEAD_OR_BODY($Fail);
+}
+
+BADARITH0() {
+ c_p->freason = BADARITH;
+ goto find_func_info;
+}
+
+SYSTEM_LIMIT(Fail) {
+ c_p->freason = SYSTEM_LIMIT;
+ $FAIL_HEAD_OR_BODY($Fail);
+}
+
+BIF_ERROR_ARITY_1(Fail, BIF, Op1) {
+ //| -no_prefetch
+ if (ERTS_LIKELY($Fail)) {
+ $FAIL($Fail);
+ }
+ reg[0] = $Op1;
+ SWAPOUT;
+ I = handle_error(c_p, I, reg, &bif_export[$BIF]->info.mfa);
+ goto post_error_handling;
+}
+
+BIF_ERROR_ARITY_2(Fail, BIF, Op1, Op2) {
+ //| -no_prefetch
+ if (ERTS_LIKELY($Fail)) {
+ $FAIL($Fail);
+ }
+ reg[0] = $Op1;
+ reg[1] = $Op2;
+ SWAPOUT;
+ I = handle_error(c_p, I, reg, &bif_export[$BIF]->info.mfa);
+ goto post_error_handling;
+}
diff --git a/erts/emulator/beam/map_instrs.tab b/erts/emulator/beam/map_instrs.tab
new file mode 100644
index 0000000000..c594a87298
--- /dev/null
+++ b/erts/emulator/beam/map_instrs.tab
@@ -0,0 +1,159 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+ensure_map(Map) {
+ if (is_not_map($Map)) {
+ c_p->freason = BADMAP;
+ c_p->fvalue = $Map;
+ $FAIL_BODY();
+ }
+}
+
+new_map(Dst, Live, N) {
+ Eterm res;
+
+ HEAVY_SWAPOUT;
+ res = erts_gc_new_map(c_p, reg, $Live, $N, $NEXT_INSTRUCTION);
+ HEAVY_SWAPIN;
+ $REFRESH_GEN_DEST();
+ $Dst = res;
+ $NEXT($NEXT_INSTRUCTION+$N);
+}
+
+i_new_small_map_lit(Dst, Live, Keys) {
+ Eterm res;
+ Uint n;
+ Eterm keys = $Keys;
+
+ HEAVY_SWAPOUT;
+ res = erts_gc_new_small_map_lit(c_p, reg, keys, $Live, $NEXT_INSTRUCTION);
+ HEAVY_SWAPIN;
+ $REFRESH_GEN_DEST();
+ $Dst = res;
+ n = arityval(*tuple_val(keys));
+ $NEXT($NEXT_INSTRUCTION+n);
+}
+
+i_get_map_element(Fail, Src, Key, Dst) {
+ Eterm res = get_map_element($Src, $Key);
+ if (is_non_value(res)) {
+ $FAIL($Fail);
+ }
+ $Dst = res;
+}
+
+i_get_map_element_hash(Fail, Src, Key, Hx, Dst) {
+ Eterm res = get_map_element_hash($Src, $Key, $Hx);
+ if (is_non_value(res)) {
+ $FAIL($Fail);
+ }
+ $Dst = res;
+}
+
+i_get_map_elements(Fail, Src, N) {
+ Eterm map;
+ BeamInstr *fs;
+ Uint sz, n;
+
+ map = $Src;
+
+ /* This instruction assumes Arg1 is a map,
+ * i.e. that it follows a test is_map if needed.
+ */
+
+ n = (Uint)$N / 3;
+ fs = $NEXT_INSTRUCTION;
+
+ if (is_flatmap(map)) {
+ flatmap_t *mp;
+ Eterm *ks;
+ Eterm *vs;
+
+ mp = (flatmap_t *)flatmap_val(map);
+ sz = flatmap_get_size(mp);
+
+ if (sz == 0) {
+ $FAIL($Fail);
+ }
+
+ ks = flatmap_get_keys(mp);
+ vs = flatmap_get_values(mp);
+
+ while(sz) {
+ if (EQ((Eterm) fs[0], *ks)) {
+ PUT_TERM_REG(*vs, fs[1]);
+ n--;
+ fs += 3;
+ /* no more values to fetch, we are done */
+ if (n == 0) {
+ $NEXT(fs);
+ }
+ }
+ ks++, sz--, vs++;
+ }
+ $FAIL($Fail);
+ } else {
+ const Eterm *v;
+ Uint32 hx;
+ ASSERT(is_hashmap(map));
+ while(n--) {
+ hx = fs[2];
+ ASSERT(hx == hashmap_make_hash((Eterm)fs[0]));
+ if ((v = erts_hashmap_get(hx, (Eterm)fs[0], map)) == NULL) {
+ $FAIL($Fail);
+ }
+ PUT_TERM_REG(*v, fs[1]);
+ fs += 3;
+ }
+ $NEXT(fs);
+ }
+}
+
+update_map_assoc(Src, Dst, Live, N) {
+ Eterm res;
+ Uint live = $Live;
+
+ reg[live] = $Src;
+ HEAVY_SWAPOUT;
+ res = erts_gc_update_map_assoc(c_p, reg, live, $N, $NEXT_INSTRUCTION);
+ HEAVY_SWAPIN;
+ ASSERT(is_value(res));
+ $REFRESH_GEN_DEST();
+ $Dst = res;
+ $NEXT($NEXT_INSTRUCTION+$N);
+}
+
+update_map_exact(Fail, Src, Dst, Live, N) {
+ Eterm res;
+ Uint live = $Live;
+
+ reg[live] = $Src;
+ HEAVY_SWAPOUT;
+ res = erts_gc_update_map_exact(c_p, reg, live, $N, $NEXT_INSTRUCTION);
+ HEAVY_SWAPIN;
+ if (is_value(res)) {
+ $REFRESH_GEN_DEST();
+ $Dst = res;
+ $NEXT($NEXT_INSTRUCTION+$N);
+ } else {
+ $FAIL_HEAD_OR_BODY($Fail);
+ }
+}
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
index 4f36377450..0642a06123 100644
--- a/erts/emulator/beam/module.c
+++ b/erts/emulator/beam/module.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -26,6 +26,7 @@
#include "erl_vm.h"
#include "global.h"
#include "module.h"
+#include "beam_catches.h"
#ifdef DEBUG
# define IF_DEBUG(x) x
@@ -38,9 +39,9 @@
static IndexTable module_tables[ERTS_NUM_CODE_IX];
-erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
+erts_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
-static erts_smp_atomic_t tot_module_bytes;
+static erts_atomic_t tot_module_bytes;
/* SMP note: Active module table lookup and current module instance can be
* read without any locks. Old module instances are protected by
@@ -48,9 +49,7 @@ static erts_smp_atomic_t tot_module_bytes;
* Staging table is protected by the "code_ix lock".
*/
-#include "erl_smp.h"
-
-void module_info(int to, void *to_arg)
+void module_info(fmtfn_t to, void *to_arg)
{
index_info(to, to_arg, &module_tables[erts_active_code_ix()]);
}
@@ -67,32 +66,37 @@ static int module_cmp(Module* tmpl, Module* obj)
return tmpl->module != obj->module;
}
+void erts_module_instance_init(struct erl_module_instance* modi)
+{
+ modi->code_hdr = 0;
+ modi->code_length = 0;
+ modi->catches = BEAM_CATCHES_NIL;
+ modi->nif = NULL;
+ modi->num_breakpoints = 0;
+ modi->num_traced_exports = 0;
+#ifdef HIPE
+ modi->hipe_code = NULL;
+#endif
+}
static Module* module_alloc(Module* tmpl)
{
Module* obj = (Module*) erts_alloc(ERTS_ALC_T_MODULE, sizeof(Module));
- erts_smp_atomic_add_nob(&tot_module_bytes, sizeof(Module));
+ erts_atomic_add_nob(&tot_module_bytes, sizeof(Module));
obj->module = tmpl->module;
- obj->curr.code_hdr = 0;
- obj->old.code_hdr = 0;
- obj->curr.code_length = 0;
- obj->old.code_length = 0;
obj->slot.index = -1;
- obj->curr.nif = NULL;
- obj->old.nif = NULL;
- obj->curr.num_breakpoints = 0;
- obj->old.num_breakpoints = 0;
- obj->curr.num_traced_exports = 0;
- obj->old.num_traced_exports = 0;
+ erts_module_instance_init(&obj->curr);
+ erts_module_instance_init(&obj->old);
obj->on_load = 0;
+ DBG_TRACE_MFA(make_atom(obj->module), 0, 0, "module_alloc");
return obj;
}
static void module_free(Module* mod)
{
erts_free(ERTS_ALC_T_MODULE, mod);
- erts_smp_atomic_add_nob(&tot_module_bytes, -sizeof(Module));
+ erts_atomic_add_nob(&tot_module_bytes, -sizeof(Module));
}
void init_module_table(void)
@@ -114,11 +118,13 @@ void init_module_table(void)
}
for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- erts_smp_rwmtx_init_x(&the_old_code_rwlocks[i], "old_code", make_small(i));
+ erts_rwmtx_init(&the_old_code_rwlocks[i], "old_code", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
- erts_smp_atomic_init_nob(&tot_module_bytes, 0);
+ erts_atomic_init_nob(&tot_module_bytes, 0);
}
+
Module*
erts_get_module(Eterm mod, ErtsCodeIndex code_ix)
{
@@ -139,27 +145,31 @@ erts_get_module(Eterm mod, ErtsCodeIndex code_ix)
}
}
-Module*
-erts_put_module(Eterm mod)
+
+static Module* put_module(Eterm mod, IndexTable* mod_tab)
{
Module e;
- IndexTable* mod_tab;
int oldsz, newsz;
Module* res;
ASSERT(is_atom(mod));
- ERTS_SMP_LC_ASSERT(erts_initialized == 0
- || erts_has_code_write_permission());
-
- mod_tab = &module_tables[erts_staging_code_ix()];
e.module = atom_val(mod);
oldsz = index_table_sz(mod_tab);
res = (Module*) index_put_entry(mod_tab, (void*) &e);
newsz = index_table_sz(mod_tab);
- erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+ erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
return res;
}
+Module*
+erts_put_module(Eterm mod)
+{
+ ERTS_LC_ASSERT(erts_initialized == 0
+ || erts_has_code_write_permission());
+
+ return put_module(mod, &module_tables[erts_staging_code_ix()]);
+}
+
Module *module_code(int i, ErtsCodeIndex code_ix)
{
return (Module*) erts_index_lookup(&module_tables[code_ix], i);
@@ -172,7 +182,7 @@ int module_code_size(ErtsCodeIndex code_ix)
int module_table_sz(void)
{
- return erts_smp_atomic_read_nob(&tot_module_bytes);
+ return erts_atomic_read_nob(&tot_module_bytes);
}
#ifdef DEBUG
@@ -181,6 +191,13 @@ static ErtsCodeIndex dbg_load_code_ix = 0;
static int entries_at_start_staging = 0;
+static ERTS_INLINE void copy_module(Module* dst_mod, Module* src_mod)
+{
+ dst_mod->curr = src_mod->curr;
+ dst_mod->old = src_mod->old;
+ dst_mod->on_load = src_mod->on_load;
+}
+
void module_start_staging(void)
{
IndexTable* src = &module_tables[erts_active_code_ix()];
@@ -199,10 +216,7 @@ void module_start_staging(void)
src_mod = (Module*) erts_index_lookup(src, i);
dst_mod = (Module*) erts_index_lookup(dst, i);
ASSERT(src_mod->module == dst_mod->module);
-
- dst_mod->curr = src_mod->curr;
- dst_mod->old = src_mod->old;
- dst_mod->on_load = src_mod->on_load;
+ copy_module(dst_mod, src_mod);
}
/*
@@ -214,12 +228,10 @@ void module_start_staging(void)
dst_mod = (Module*) index_put_entry(dst, src_mod);
ASSERT(dst_mod != src_mod);
- dst_mod->curr = src_mod->curr;
- dst_mod->old = src_mod->old;
- dst_mod->on_load = src_mod->on_load;
+ copy_module(dst_mod, src_mod);
}
newsz = index_table_sz(dst);
- erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+ erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
entries_at_start_staging = dst->entries;
IF_DEBUG(dbg_load_code_ix = erts_staging_code_ix());
@@ -237,9 +249,8 @@ void module_end_staging(int commit)
oldsz = index_table_sz(tab);
index_erase_latest_from(tab, entries_at_start_staging);
newsz = index_table_sz(tab);
- erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+ erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
}
IF_DEBUG(dbg_load_code_ix = -1);
}
-
diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h
index 1c1afc8461..00efd129ff 100644
--- a/erts/emulator/beam/module.h
+++ b/erts/emulator/beam/module.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,6 +23,10 @@
#include "index.h"
+#ifdef HIPE
+#include "hipe_module.h"
+#endif
+
struct erl_module_instance {
BeamCodeHeader* code_hdr;
int code_length; /* Length of loaded code in bytes. */
@@ -30,6 +34,9 @@ struct erl_module_instance {
struct erl_module_nif* nif;
int num_breakpoints;
int num_traced_exports;
+#ifdef HIPE
+ HipeModule *hipe_code;
+#endif
};
typedef struct erl_module {
@@ -38,17 +45,18 @@ typedef struct erl_module {
int seen; /* Used by finish_loading() */
struct erl_module_instance curr;
- struct erl_module_instance old; /* protected by "old_code" rwlock */
+ struct erl_module_instance old; /* active protected by "old_code" rwlock */
struct erl_module_instance* on_load;
} Module;
+void erts_module_instance_init(struct erl_module_instance* modi);
Module* erts_get_module(Eterm mod, ErtsCodeIndex code_ix);
Module* erts_put_module(Eterm mod);
void init_module_table(void);
void module_start_staging(void);
void module_end_staging(int commit);
-void module_info(int, void *);
+void module_info(fmtfn_t, void *);
Module *module_code(int, ErtsCodeIndex);
int module_code_size(ErtsCodeIndex);
@@ -64,29 +72,29 @@ int erts_is_old_code_rlocked(ErtsCodeIndex);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-extern erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
+extern erts_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
ERTS_GLB_INLINE void erts_rwlock_old_code(ErtsCodeIndex code_ix)
{
- erts_smp_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]);
+ erts_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]);
}
ERTS_GLB_INLINE void erts_rwunlock_old_code(ErtsCodeIndex code_ix)
{
- erts_smp_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]);
+ erts_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]);
}
ERTS_GLB_INLINE void erts_rlock_old_code(ErtsCodeIndex code_ix)
{
- erts_smp_rwmtx_rlock(&the_old_code_rwlocks[code_ix]);
+ erts_rwmtx_rlock(&the_old_code_rwlocks[code_ix]);
}
ERTS_GLB_INLINE void erts_runlock_old_code(ErtsCodeIndex code_ix)
{
- erts_smp_rwmtx_runlock(&the_old_code_rwlocks[code_ix]);
+ erts_rwmtx_runlock(&the_old_code_rwlocks[code_ix]);
}
#ifdef ERTS_ENABLE_LOCK_CHECK
ERTS_GLB_INLINE int erts_is_old_code_rlocked(ErtsCodeIndex code_ix)
{
- return erts_smp_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]);
+ return erts_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]);
}
#endif
diff --git a/erts/emulator/beam/msg_instrs.tab b/erts/emulator/beam/msg_instrs.tab
new file mode 100644
index 0000000000..9bf3aefaca
--- /dev/null
+++ b/erts/emulator/beam/msg_instrs.tab
@@ -0,0 +1,403 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017-2018. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+// /*
+// * Skeleton for receive statement:
+// *
+// * recv_mark L1 Optional
+// * call make_ref/monitor Optional
+// * ...
+// * recv_set L1 Optional
+// * L1: <-------------------+
+// * <-----------+ |
+// * | |
+// * loop_rec L2 ------+---+ |
+// * ... | | |
+// * remove_message | | |
+// * jump L3 | | |
+// * ... | | |
+// * loop_rec_end L1 --+ | |
+// * L2: <---------------+ |
+// * wait L1 -------------------+ or wait_timeout
+// * timeout
+// *
+// * L3: Code after receive...
+// *
+// */
+
+i_recv_mark() {
+ /*
+ * Save the current end of message queue
+ */
+ ERTS_RECV_MARK_SAVE(c_p);
+}
+
+i_recv_set() {
+ /*
+ * If previously saved recv mark, set peek position to it
+ */
+ ERTS_RECV_MARK_SET(c_p);
+ SET_I($NEXT_INSTRUCTION);
+ goto loop_rec_top__;
+ //| -no_next
+}
+
+i_loop_rec(Dest) {
+ //| -no_prefetch
+
+ /*
+ * Pick up the next message and place it in x(0).
+ * If no message, jump to a wait or wait_timeout instruction.
+ */
+
+ ErtsMessage* msgp;
+
+ /* Entry point from recv_set */
+ loop_rec_top__:
+
+ /*
+ * We need to disable GC while matching messages
+ * in the queue. This since messages with data outside
+ * the heap will be corrupted by a GC.
+ */
+ ASSERT(!(c_p->flags & F_DELAY_GC));
+ c_p->flags |= F_DELAY_GC;
+
+ /* Entry point from loop_rec_end (and locally) */
+ loop_rec__:
+
+ if (FCALLS <= 0 && FCALLS <= neg_o_reds) {
+ $SET_CP_I_ABS(I);
+ c_p->flags &= ~F_DELAY_GC;
+ SWAPOUT;
+ c_p->arity = 0;
+ c_p->current = NULL;
+ goto do_schedule;
+ }
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+
+ msgp = PEEK_MESSAGE(c_p);
+
+ if (ERTS_UNLIKELY(msgp == NULL)) {
+ int get_out;
+ SWAPOUT;
+ $SET_CP_I_ABS(I);
+ c_p->arity = 0;
+ c_p->current = NULL;
+ FCALLS -= erts_proc_sig_receive_helper(c_p, FCALLS, neg_o_reds,
+ &msgp, &get_out);
+ SWAPIN;
+ if (ERTS_UNLIKELY(msgp == NULL)) {
+ if (get_out) {
+ if (get_out < 0) {
+ ASSERT(FCALLS <= 0 && FCALLS <= neg_o_reds);
+ goto loop_rec__; /* yield */
+ }
+ else {
+ ASSERT(ERTS_PROC_IS_EXITING(c_p));
+ goto do_schedule; /* exit */
+ }
+ }
+
+ /*
+ * If there are no more messages in queue
+ * (and we are not yielding or exiting)
+ * erts_proc_sig_receive_helper()
+ * returns with message queue lock locked...
+ */
+ c_p->flags &= ~F_DELAY_GC;
+ $SET_I_REL($Dest);
+ Goto(*I); /* Jump to a wait or wait_timeout instruction */
+ }
+ }
+
+ ASSERT(msgp == PEEK_MESSAGE(c_p));
+ ASSERT(msgp && ERTS_SIG_IS_MSG(msgp));
+
+ if (ERTS_UNLIKELY(ERTS_SIG_IS_EXTERNAL_MSG(msgp))) {
+ FCALLS -= 10; /* FIXME: bump appropriate amount... */
+ SWAPOUT; /* erts_decode_dist_message() may write to heap... */
+ if (!erts_decode_dist_message(c_p, ERTS_PROC_LOCK_MAIN, msgp, 0)) {
+ /*
+ * A corrupt distribution message that we weren't able to decode;
+ * remove it...
+ */
+ /* No swapin should be needed */
+ ASSERT(HTOP == c_p->htop && E == c_p->stop);
+ /* TODO: Add DTrace probe for this bad message situation? */
+ UNLINK_MESSAGE(c_p, msgp);
+ msgp->next = NULL;
+ erts_cleanup_messages(msgp);
+ goto loop_rec__;
+ }
+ SWAPIN;
+ }
+
+ ASSERT(msgp == PEEK_MESSAGE(c_p));
+ ASSERT(ERTS_SIG_IS_INTERNAL_MSG(msgp));
+
+ r(0) = ERL_MESSAGE_TERM(msgp);
+}
+
+remove_message() {
+ //| -no_prefetch
+
+ /*
+ * Remove a (matched) message from the message queue.
+ */
+
+ ErtsMessage* msgp;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+
+ ERTS_CHK_MBUF_SZ(c_p);
+
+ msgp = PEEK_MESSAGE(c_p);
+
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
+ save_calls(c_p, &exp_receive);
+ }
+ if (ERL_MESSAGE_TOKEN(msgp) == NIL) {
+#ifdef USE_VM_PROBES
+ if (DT_UTAG(c_p) != NIL) {
+ if (DT_UTAG_FLAGS(c_p) & DT_UTAG_PERMANENT) {
+ SEQ_TRACE_TOKEN(c_p) = am_have_dt_utag;
+ } else {
+ DT_UTAG(c_p) = NIL;
+ SEQ_TRACE_TOKEN(c_p) = NIL;
+ }
+ } else {
+#endif
+ SEQ_TRACE_TOKEN(c_p) = NIL;
+#ifdef USE_VM_PROBES
+ }
+ DT_UTAG_FLAGS(c_p) &= ~DT_UTAG_SPREADING;
+#endif
+ } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) {
+ Eterm msg;
+ SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp);
+#ifdef USE_VM_PROBES
+ if (ERL_MESSAGE_TOKEN(msgp) == am_have_dt_utag) {
+ if (DT_UTAG(c_p) == NIL) {
+ DT_UTAG(c_p) = ERL_MESSAGE_DT_UTAG(msgp);
+ }
+ DT_UTAG_FLAGS(c_p) |= DT_UTAG_SPREADING;
+ } else {
+#endif
+ ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
+ ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
+ ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
+ ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
+ ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
+ ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
+ c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
+ if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
+ c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
+ }
+ msg = ERL_MESSAGE_TERM(msgp);
+ seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
+ c_p->common.id, c_p);
+#ifdef USE_VM_PROBES
+ }
+#endif
+ }
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(message_receive)) {
+ Eterm token2 = NIL;
+ DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+ Sint len = erts_proc_sig_privqs_len(c_p);
+
+ dtrace_proc_str(c_p, receiver_name);
+ token2 = SEQ_TRACE_TOKEN(c_p);
+ if (have_seqtrace(token2)) {
+ tok_label = SEQ_TRACE_T_DTRACE_LABEL(token2);
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token2));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token2));
+ }
+ DTRACE6(message_receive,
+ receiver_name, size_object(ERL_MESSAGE_TERM(msgp)),
+ len, /* This is NOT message queue len, but its something... */
+ tok_label, tok_lastcnt, tok_serial);
+ }
+#endif
+ UNLINK_MESSAGE(c_p, msgp);
+ JOIN_MESSAGE(c_p);
+ CANCEL_TIMER(c_p);
+
+ erts_save_message_in_proc(c_p, msgp);
+ c_p->flags &= ~F_DELAY_GC;
+
+ if (ERTS_IS_GC_DESIRED_INTERNAL(c_p, HTOP, E)) {
+ /*
+ * We want to GC soon but we leave a few
+ * reductions giving the message some time
+ * to turn into garbage.
+ */
+ ERTS_VBUMP_LEAVE_REDS_INTERNAL(c_p, 5, FCALLS);
+ }
+
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+ ERTS_CHK_MBUF_SZ(c_p);
+
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+}
+
+loop_rec_end(Dest) {
+ //| -no_next
+ /*
+ * Advance the save pointer to the next message (the current
+ * message didn't match), then jump to the loop_rec instruction.
+ */
+
+ ASSERT(c_p->flags & F_DELAY_GC);
+
+ $SET_I_REL($Dest);
+ SAVE_MESSAGE(c_p);
+ FCALLS--;
+ goto loop_rec__;
+}
+
+timeout_locked() {
+ /*
+ * A timeout has occurred. Reset the save pointer so that the next
+ * receive statement will examine the first message first.
+ */
+
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ $timeout();
+}
+
+timeout() {
+ if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
+ trace_receive(c_p, am_clock_service, am_timeout, NULL);
+ }
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
+ save_calls(c_p, &exp_timeout);
+ }
+ c_p->flags &= ~F_TIMO;
+ JOIN_MESSAGE(c_p);
+}
+
+TIMEOUT_VALUE() {
+ c_p->freason = EXC_TIMEOUT_VALUE;
+ goto find_func_info;
+ //| -no_next
+}
+
+i_wait_error_locked() {
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ $TIMEOUT_VALUE();
+}
+
+i_wait_error() {
+ $TIMEOUT_VALUE();
+}
+
+wait_timeout_unlocked_int := wait.lock.int.execute;
+wait_timeout_locked_int := wait.int.execute;
+
+wait_timeout_unlocked := wait.lock.src.execute;
+wait_timeout_locked := wait.src.execute;
+
+wait_unlocked := wait.lock.execute;
+wait_locked := wait.unlocked.execute;
+
+wait.lock() {
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+}
+
+wait.unlocked() {
+}
+
+wait.int(Int) {
+ /*
+ * If we have already set the timer, we must NOT set it again. Therefore,
+ * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
+ */
+ if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
+ BeamInstr** pi = (BeamInstr **) c_p->def_arg_reg;
+ *pi = $NEXT_INSTRUCTION;
+ erts_set_proc_timer_uword(c_p, $Int);
+ }
+}
+
+wait.src(Src) {
+ /*
+ * If we have already set the timer, we must NOT set it again. Therefore,
+ * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
+ */
+ if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
+ Eterm timeout_value = $Src;
+ if (timeout_value == make_small(0)) {
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ $NEXT0();
+ } else if (timeout_value == am_infinity) {
+ c_p->flags |= F_TIMO;
+ } else {
+ int tres = erts_set_proc_timer_term(c_p, timeout_value);
+ if (tres == 0) {
+ /*
+ * The timer routiner will set c_p->i to the value in
+ * c_p->def_arg_reg[0]. Note that it is safe to use this
+ * location because there are no living x registers in
+ * a receive statement.
+ */
+ BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg;
+ *pi = $NEXT_INSTRUCTION;
+ } else { /* Wrong time */
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ c_p->freason = EXC_TIMEOUT_VALUE;
+ goto find_func_info;
+ }
+ }
+ }
+}
+
+//
+// Prepare to wait indefinitely for a new message to arrive
+// (or the time set above if falling through from above).
+// When a new message arrives, control will be transferred
+// the loop_rec instruction (at label L1). In case of
+// of timeout, control will be transferred to the timeout
+// instruction following the wait_timeout instruction.
+//
+
+wait.execute(JumpTarget) {
+ $SET_REL_I(c_p->i, $JumpTarget); /* L1 */
+ SWAPOUT;
+ c_p->arity = 0;
+
+ if (!ERTS_PTMR_IS_TIMED_OUT(c_p)) {
+ erts_atomic32_read_band_relb(&c_p->state,
+ ~ERTS_PSFLG_ACTIVE);
+ }
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ c_p->current = NULL;
+ goto do_schedule;
+ //| -no_next
+}
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 879daaca0a..e76d896ffc 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -59,6 +59,7 @@ put_tuple u==0 d => too_old_compiler
# All the other instructions.
#
+%cold
label L
i_func_info I a a I
int_code_end
@@ -68,6 +69,8 @@ i_debug_breakpoint
i_return_time_trace
i_return_to_trace
i_yield
+trace_jump W
+%hot
return
@@ -96,24 +99,21 @@ line Loc | func_info M F A => func_info M F A | line Loc
line I
+allocate t t?
+allocate_heap t I t?
-%macro: allocate Allocate -pack
-%macro: allocate_zero AllocateZero -pack
-%macro: allocate_heap AllocateHeap -pack
-%macro: allocate_heap_zero AllocateHeapZero -pack
-%macro: test_heap TestHeap -pack
+%cold
+deallocate Q
+%hot
-allocate t t
-allocate_heap t I t
-deallocate I
init y
-allocate_zero t t
-allocate_heap_zero t I t
+allocate_zero t t?
+allocate_heap_zero t I t?
trim N Remaining => i_trim N
-i_trim I
+i_trim t
-test_heap I t
+test_heap I t?
allocate_heap S u==0 R => allocate S R
allocate_heap_zero S u==0 R => allocate_zero S R
@@ -122,8 +122,6 @@ init2 y y
init3 y y y
init Y1 | init Y2 | init Y3 => init3 Y1 Y2 Y3
init Y1 | init Y2 => init2 Y1 Y2
-%macro: init2 Init2 -pack
-%macro: init3 Init3 -pack
# Selecting values
@@ -160,37 +158,21 @@ is_tuple Fail=f S | select_tuple_arity S=d Fail=f Size=u Rest=* => \
select_tuple_arity S=d Fail=f Size=u Rest=* => \
gen_select_tuple_arity(S, Fail, Size, Rest)
-i_select_val_bins x f I
-i_select_val_bins y f I
+i_select_val_bins xy f? I
-i_select_val_lins x f I
-i_select_val_lins y f I
+i_select_val_lins xy f? I
-i_select_val2 x f c c f f
-i_select_val2 y f c c f f
+i_select_val2 xy f? c c
-i_select_tuple_arity x f I
-i_select_tuple_arity y f I
+i_select_tuple_arity xy f? I
-i_select_tuple_arity2 x f A A f f
-i_select_tuple_arity2 y f A A f f
+i_select_tuple_arity2 xy f? A A
-i_jump_on_val_zero x f I
-i_jump_on_val_zero y f I
+i_jump_on_val_zero xy f? I
-i_jump_on_val x f I I
-i_jump_on_val y f I I
+i_jump_on_val xy f? I W
-%macro: get_list GetList -pack
-get_list x x x
-get_list x x y
-get_list x y x
-get_list x y y
-
-get_list y x x
-get_list y x y
-get_list y y x
-get_list y y y
+get_list xy xy xy
# The following get_list instructions using x(0) are frequently used.
get_list r x x
@@ -200,52 +182,55 @@ get_list r x y
get_list r y r
get_list r x r
+get_hd xy xy
+get_tl xy xy
+
# Old-style catch.
catch y f
catch_end y
# Try/catch.
try Y F => catch Y F
-try_case Y => try_end Y
+
+try_case y
try_end y
+%cold
try_case_end s
+%hot
# Destructive set tuple element
-set_tuple_element s d P
+set_tuple_element s S P
# Get tuple element
-%macro: i_get_tuple_element GetTupleElement -pack
-i_get_tuple_element x P x
-i_get_tuple_element y P x
+i_get_tuple_element xy P x
%cold
-i_get_tuple_element x P y
-i_get_tuple_element y P y
+i_get_tuple_element xy P y
%hot
-%macro: i_get_tuple_element2 GetTupleElement2 -pack
i_get_tuple_element2 x P x
-
-%macro: i_get_tuple_element2y GetTupleElement2Y -pack
i_get_tuple_element2y x P y y
-%macro: i_get_tuple_element3 GetTupleElement3 -pack
i_get_tuple_element3 x P x
-%macro: is_number IsNumber -fail_action
%cold
-is_number f x
-is_number f y
+is_number f? xy
%hot
+
is_number Fail=f i =>
is_number Fail=f na => jump Fail
is_number Fail Literal=q => move Literal x | is_number Fail x
jump f
+#
+# Expection rasing instructions. Infrequently executed.
+#
+
+%cold
case_end NotInX=cy => move NotInX x | case_end x
badmatch NotInX=cy => move NotInX x | badmatch x
@@ -259,7 +244,7 @@ if_end
# Optimize for that case.
raise x==2 x==1 => i_raise
raise Trace=y Value=y => move Trace x=2 | move Value x=1 | i_raise
-raise Trace Value => move Trace x=3 | move Value x=1 | move x=3 x=2 | i_raise
+raise Trace Value => move Trace x | move Value x=1 | move x x=2 | i_raise
i_raise
@@ -267,14 +252,15 @@ i_raise
badarg j
system_limit j
-move C=cxy x==0 | jump Lbl => move_jump Lbl C
+%hot
+
+#
+# Move instructions.
+#
-%macro: move_jump MoveJump -nonext
-move_jump f n
-move_jump f c
-move_jump f x
-move_jump f y
+move C=cxy x==0 | jump Lbl => move_jump Lbl C
+move_jump f ncxy
# Movement to and from the stack is common
# Try to pack as much as we can into one instruction
@@ -297,10 +283,6 @@ move_window X1=x X2=x X3=x X4=x Y1=y Y4=y | move X5=x Y5=y | succ(Y4,Y5) => \
move_window X1=x X2=x X3=x Y1=y Y3=y => move_window3 X1 X2 X3 Y1
move_window X1=x X2=x X3=x X4=x Y1=y Y4=y => move_window4 X1 X2 X3 X4 Y1
-%macro: move_window3 MoveWindow3 -pack
-%macro: move_window4 MoveWindow4 -pack
-%macro: move_window5 MoveWindow5 -pack
-
move_window3 x x x y
move_window4 x x x x y
move_window5 x x x x x y
@@ -325,13 +307,9 @@ swap_temp R1 R2 Tmp | line Loc | call_ext_only Live Addr | \
swap_temp R1 R2 Tmp | line Loc | call_ext_last Live Addr D | \
is_killed(Tmp, Live) => swap R1 R2 | line Loc | call_ext_last Live Addr D
-%macro: swap_temp SwapTemp -pack
-swap_temp x x x
-swap_temp x y x
+swap_temp x xy x
-%macro: swap Swap -pack
-swap x x
-swap x y
+swap x xy
move Src=x D1=x | move Src=x D2=x => move_dup Src D1 D2
move Src=x SD=x | move SD=x D=x => move_dup Src SD D
@@ -374,19 +352,12 @@ move C=aiq X=x==2 => move_x2 C
move_x1 c
move_x2 c
-%macro: move_shift MoveShift -pack
move_shift x x x
move_shift y x x
move_shift x y x
move_shift x x y
-%macro: move_dup MoveDup -pack
-move_dup x x x
-move_dup x x y
-move_dup y x x
-move_dup y x y
-
-%macro: move2_par Move2Par -pack
+move_dup xy x xy
move2_par x y x y
move2_par y x y x
@@ -399,7 +370,6 @@ move2_par y x x y
move2_par x x y x
move2_par y x x x
-%macro: move3 Move3 -pack
move3 x y x y x y
move3 y x y x y x
move3 x x x x x x
@@ -409,7 +379,6 @@ move3 x x x x x x
move S=n D=y => init D
move S=c D=y => move S x | move x D
-%macro:move Move -pack -gen_dest
move x x
move x y
move y x
@@ -429,13 +398,15 @@ move r y
loop_rec Fail x==0 | smp_mark_target_label(Fail) => i_loop_rec Fail
-label L | wait_timeout Fail Src | smp_already_locked(L) => label L | i_wait_timeout_locked Fail Src
-wait_timeout Fail Src => i_wait_timeout Fail Src
-i_wait_timeout Fail Src=aiq => gen_literal_timeout(Fail, Src)
-i_wait_timeout_locked Fail Src=aiq => gen_literal_timeout_locked(Fail, Src)
+label L | wait_timeout Fail Src | smp_already_locked(L) => \
+ label L | wait_timeout_locked Src Fail
+wait_timeout Fail Src => wait_timeout_unlocked Src Fail
+
+wait_timeout_unlocked Src=aiq Fail => gen_literal_timeout(Fail, Src)
+wait_timeout_locked Src=aiq Fail => gen_literal_timeout_locked(Fail, Src)
label L | wait Fail | smp_already_locked(L) => label L | wait_locked Fail
-wait Fail | smp() => wait_unlocked Fail
+wait Fail => wait_unlocked Fail
label L | timeout | smp_already_locked(L) => label L | timeout_locked
@@ -444,15 +415,19 @@ timeout
timeout_locked
i_loop_rec f
loop_rec_end f
-wait f
wait_locked f
wait_unlocked f
-i_wait_timeout f I
-i_wait_timeout f s
-i_wait_timeout_locked f I
-i_wait_timeout_locked f s
+
+# Note that a timeout value must fit in 32 bits.
+wait_timeout_unlocked_int I f
+wait_timeout_unlocked s f
+wait_timeout_locked_int I f
+wait_timeout_locked s f
+
+%cold
i_wait_error
i_wait_error_locked
+%hot
send
@@ -460,57 +435,52 @@ send
# Optimized comparisons with one immediate/literal operand.
#
-is_eq_exact Lbl R=xy C=ian => i_is_eq_exact_immed Lbl R C
+is_eq_exact Lbl S S =>
+is_eq_exact Lbl C1=c C2=c => move C1 x | is_eq_exact Lbl x C2
+is_eq_exact Lbl C=c R=xy => is_eq_exact Lbl R C
+
+is_eq_exact Lbl R=xy n => is_nil Lbl R
+is_eq_exact Lbl R=xy C=ia => i_is_eq_exact_immed Lbl R C
is_eq_exact Lbl R=xy C=q => i_is_eq_exact_literal Lbl R C
+is_ne_exact Lbl S S => jump Lbl
+is_ne_exact Lbl C1=c C2=c => move C1 x | is_ne_exact Lbl x C2
+is_ne_exact Lbl C=c R=xy => is_ne_exact Lbl R C
+
is_ne_exact Lbl R=xy C=ian => i_is_ne_exact_immed Lbl R C
is_ne_exact Lbl R=xy C=q => i_is_ne_exact_literal Lbl R C
-%macro: i_is_eq_exact_immed EqualImmed -fail_action
-i_is_eq_exact_immed f r c
-i_is_eq_exact_immed f x c
-i_is_eq_exact_immed f y c
+i_is_eq_exact_immed f? rxy c
-i_is_eq_exact_literal f x c
-i_is_eq_exact_literal f y c
+i_is_eq_exact_literal f? xy c
-%macro: i_is_ne_exact_immed NotEqualImmed -fail_action
-i_is_ne_exact_immed f x c
-i_is_ne_exact_immed f y c
+i_is_ne_exact_immed f? xy c
-i_is_ne_exact_literal f x c
-i_is_ne_exact_literal f y c
+i_is_ne_exact_literal f? xy c
is_eq_exact Lbl Y=y X=x => is_eq_exact Lbl X Y
-%macro: is_eq_exact EqualExact -fail_action -pack
-is_eq_exact f x x
-is_eq_exact f x y
-is_eq_exact f s s
-
-%macro: is_lt IsLessThan -fail_action
-is_lt f x x
-is_lt f x c
-is_lt f c x
+is_eq_exact f? x xy
+is_eq_exact f? y y
+
+is_ne_exact f? S S
+
+is_lt f? x x
+is_lt f? x c
+is_lt f? c x
%cold
-is_lt f s s
+is_lt f? s s
%hot
-%macro: is_ge IsGreaterEqual -fail_action
-is_ge f x x
-is_ge f x c
-is_ge f c x
+is_ge f? x x
+is_ge f? x c
+is_ge f? c x
%cold
-is_ge f s s
+is_ge f? s s
%hot
-%macro: is_ne_exact NotEqualExact -fail_action
-is_ne_exact f s s
-
-%macro: is_eq Equal -fail_action
-is_eq f s s
+is_eq f? s s
-%macro: is_ne NotEqual -fail_action
-is_ne f s s
+is_ne f? s s
#
# Putting things.
@@ -527,9 +497,7 @@ i_put_tuple Dst Arity Puts=* | put S => \
i_put_tuple/2
-%macro:i_put_tuple PutTuple -pack -goto:do_put_tuple
-i_put_tuple x I
-i_put_tuple y I
+i_put_tuple xy I
#
# The instruction "put_list Const [] Dst" were generated in rare
@@ -538,7 +506,9 @@ i_put_tuple y I
#
put_list Const=c n Dst => move Const x | put_list x n Dst
-%macro:put_list PutList -pack -gen_dest
+put_list Src Dst=x Dst => update_list Src Dst
+
+update_list xyc x
put_list x n x
put_list y n x
@@ -548,8 +518,6 @@ put_list y x x
put_list y y x
put_list x y x
-put_list y x x
-
# put_list SrcReg Constant Dst
put_list x c x
@@ -564,8 +532,6 @@ put_list c y x
# The following put_list instructions using x(0) are frequently used.
-put_list y r r
-put_list x r r
put_list r n r
put_list r n x
put_list r x x
@@ -576,10 +542,12 @@ put_list x x r
put_list s s d
%hot
+
#
# Some more only used by the emulator
#
+%cold
normal_exit
continue_exit
apply_bif
@@ -587,6 +555,7 @@ call_nif
call_error_handler
error_action_code
return_trace
+%hot
#
# Instruction transformations & folded instructions.
@@ -597,52 +566,44 @@ return_trace
move S x==0 | return => move_return S
-%macro: move_return MoveReturn -nonext
-move_return x
-move_return c
-move_return n
+move_return xcn
move S x==0 | deallocate D | return => move_deallocate_return S D
-%macro: move_deallocate_return MoveDeallocateReturn -pack -nonext
-move_deallocate_return x Q
-move_deallocate_return y Q
-move_deallocate_return c Q
-move_deallocate_return n Q
+move_deallocate_return xycn Q
deallocate D | return => deallocate_return D
-%macro: deallocate_return DeallocateReturn -nonext
deallocate_return Q
test_heap Need u==1 | put_list Y=y x==0 x==0 => test_heap_1_put_list Need Y
-%macro: test_heap_1_put_list TestHeapPutList -pack
test_heap_1_put_list I y
+#
+# is_tagged_tuple Fail=f Src=rxy Arity Atom=a
+#
+
+is_tagged_tuple Fail Literal=q Arity Atom => \
+ move Literal x | is_tagged_tuple Fail x Arity Atom
+is_tagged_tuple Fail=f c Arity Atom => jump Fail
+
+is_tagged_tuple f? rxy A a
+
# Test tuple & arity (head)
is_tuple Fail Literal=q => move Literal x | is_tuple Fail x
is_tuple Fail=f c => jump Fail
is_tuple Fail=f S=xy | test_arity Fail=f S=xy Arity => is_tuple_of_arity Fail S Arity
-%macro:is_tuple_of_arity IsTupleOfArity -fail_action
-
-is_tuple_of_arity f r A
-is_tuple_of_arity f x A
-is_tuple_of_arity f y A
+is_tuple_of_arity f? rxy A
-%macro: is_tuple IsTuple -fail_action
-is_tuple f r
-is_tuple f x
-is_tuple f y
+is_tuple f? rxy
test_arity Fail Literal=q Arity => move Literal x | test_arity Fail x Arity
test_arity Fail=f c Arity => jump Fail
-%macro: test_arity IsArity -fail_action
-test_arity f x A
-test_arity f y A
+test_arity f? xy A
get_tuple_element Reg=x P1 D1=x | get_tuple_element Reg=x P2 D2=x | \
get_tuple_element Reg=x P3 D3=x | \
@@ -663,56 +624,40 @@ is_integer Fail Literal=q => move Literal x | is_integer Fail x
is_integer Fail=f S=x | allocate Need Regs => is_integer_allocate Fail S Need Regs
-%macro: is_integer_allocate IsIntegerAllocate -fail_action
-is_integer_allocate f x I I
+is_integer_allocate f? x t t
-%macro: is_integer IsInteger -fail_action
-is_integer f x
-is_integer f y
+is_integer f? xy
is_list Fail=f n =>
is_list Fail Literal=q => move Literal x | is_list Fail x
is_list Fail=f c => jump Fail
-%macro: is_list IsList -fail_action
-is_list f x
+is_list f? x
%cold
-is_list f y
+is_list f? y
%hot
is_nonempty_list Fail=f S=x | allocate Need Rs => is_nonempty_list_allocate Fail S Need Rs
-%macro:is_nonempty_list_allocate IsNonemptyListAllocate -fail_action -pack
-is_nonempty_list_allocate f r I t
-is_nonempty_list_allocate f x I t
-
-is_nonempty_list F=f x==0 | test_heap I1 I2 => is_non_empty_list_test_heap F I1 I2
-
-%macro: is_non_empty_list_test_heap IsNonemptyListTestHeap -fail_action -pack
-is_non_empty_list_test_heap f I t
+is_nonempty_list F=f x==0 | test_heap I1 I2 => is_nonempty_list_test_heap F I1 I2
is_nonempty_list Fail=f S=x | get_list S D1=x D2=x => \
is_nonempty_list_get_list Fail S D1 D2
-%macro: is_nonempty_list_get_list IsNonemptyListGetList -fail_action -pack
-is_nonempty_list_get_list f r x x
-is_nonempty_list_get_list f x x x
-
-%macro: is_nonempty_list IsNonemptyList -fail_action
-is_nonempty_list f x
-is_nonempty_list f y
+is_nonempty_list_allocate f? rx t t
+is_nonempty_list_test_heap f? I t
+is_nonempty_list_get_list f? rx x x
+is_nonempty_list f? xy
-%macro: is_atom IsAtom -fail_action
-is_atom f x
+is_atom f? x
%cold
-is_atom f y
+is_atom f? y
%hot
is_atom Fail=f a =>
is_atom Fail=f niq => jump Fail
-%macro: is_float IsFloat -fail_action
-is_float f x
+is_float f? x
%cold
-is_float f y
+is_float f? y
%hot
is_float Fail=f nai => jump Fail
is_float Fail Literal=q => move Literal x | is_float Fail x
@@ -720,16 +665,13 @@ is_float Fail Literal=q => move Literal x | is_float Fail x
is_nil Fail=f n =>
is_nil Fail=f qia => jump Fail
-%macro: is_nil IsNil -fail_action
-is_nil f x
-is_nil f y
+is_nil f? xy
is_binary Fail Literal=q => move Literal x | is_binary Fail x
is_binary Fail=f c => jump Fail
-%macro: is_binary IsBinary -fail_action
-is_binary f x
+is_binary f? x
%cold
-is_binary f y
+is_binary f? y
%hot
# XXX Deprecated.
@@ -737,31 +679,27 @@ is_bitstr Fail Term => is_bitstring Fail Term
is_bitstring Fail Literal=q => move Literal x | is_bitstring Fail x
is_bitstring Fail=f c => jump Fail
-%macro: is_bitstring IsBitstring -fail_action
-is_bitstring f x
+is_bitstring f? x
%cold
-is_bitstring f y
+is_bitstring f? y
%hot
is_reference Fail=f cq => jump Fail
-%macro: is_reference IsRef -fail_action
-is_reference f x
+is_reference f? x
%cold
-is_reference f y
+is_reference f? y
%hot
is_pid Fail=f cq => jump Fail
-%macro: is_pid IsPid -fail_action
-is_pid f x
+is_pid f? x
%cold
-is_pid f y
+is_pid f? y
%hot
is_port Fail=f cq => jump Fail
-%macro: is_port IsPort -fail_action
-is_port f x
+is_port f? x
%cold
-is_port f y
+is_port f? y
%hot
is_boolean Fail=f a==am_true =>
@@ -769,47 +707,43 @@ is_boolean Fail=f a==am_false =>
is_boolean Fail=f ac => jump Fail
%cold
-%macro: is_boolean IsBoolean -fail_action
-is_boolean f x
-is_boolean f y
+is_boolean f? xy
%hot
-is_function2 Fail=f acq Arity => jump Fail
+is_function2 Fail=f Literal=q Arity | literal_is_export(Literal) =>
+is_function2 Fail=f c Arity => jump Fail
is_function2 Fail=f Fun a => jump Fail
-is_function2 f s s
-%macro: is_function2 IsFunction2 -fail_action
+is_function2 f? S s
# Allocating & initializing.
allocate Need Regs | init Y => allocate_init Need Regs Y
init Y1 | init Y2 => init2 Y1 Y2
-%macro: allocate_init AllocateInit -pack
-allocate_init t I y
+allocate_init t t? y
#################################################################
# External function and bif calls.
#################################################################
#
-# The BIFs erts_internal:check_process_code/2 must be called like a function,
+# The BIFs erts_internal:check_process_code/1 must be called like a function,
# to ensure that c_p->i (program counter) is set correctly (an ordinary
# BIF call doesn't set it).
#
-call_ext u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext Bif
-call_ext_last u==2 Bif=u$bif:erts_internal:check_process_code/2 D => i_call_ext_last Bif D
-call_ext_only u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext_only Bif
+call_ext u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext Bif
+call_ext_last u==1 Bif=u$bif:erts_internal:check_process_code/1 D => i_call_ext_last Bif D
+call_ext_only u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext_only Bif
#
-# The BIFs erlang:garbage_collect/0 must be called like a function,
+# The BIFs erts_internal:garbage_collect/1 must be called like a function,
# to allow them to invoke the garbage collector. (The stack pointer must
# be saved and p->arity must be zeroed, which is not done on ordinary BIF calls.)
#
-
-call_ext u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext Bif
-call_ext_last u==0 Bif=u$bif:erlang:garbage_collect/0 D => i_call_ext_last Bif D
-call_ext_only u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext_only Bif
+call_ext u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext Bif
+call_ext_last u==1 Bif=u$bif:erts_internal:garbage_collect/1 D => i_call_ext_last Bif D
+call_ext_only u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext_only Bif
#
# put/2 and erase/1 must be able to do garbage collection, so we must call
@@ -1033,16 +967,18 @@ call_ext_last Ar Func D => i_call_ext_last Func D
call_ext_only Ar Func => i_call_ext_only Func
i_apply
-i_apply_last P
+i_apply_last Q
i_apply_only
i_apply_fun
-i_apply_fun_last P
+i_apply_fun_last Q
i_apply_fun_only
+%cold
i_hibernate
i_perf_counter
+%hot
call_bif e
@@ -1065,77 +1001,57 @@ bif2 Fail Bif S1 S2 Dst => i_bif2 Fail Bif S1 S2 Dst
i_get_hash c I d
i_get s d
-%macro: self Self
-self x
-self y
+self xy
-%macro: node Node
node x
%cold
node y
%hot
-i_fast_element j x I d
-i_fast_element j y I d
+# Note: 'I' is sufficient because this instruction will only be used
+# if the arity fits in 24 bits.
+i_fast_element xy j? I d
-i_element j x s d
-i_element j y s d
+i_element xy j? s d
-bif1 f b s d
+bif1 f? b s d
bif1_body b s d
-i_bif2 f b s s d
+i_bif2 f? b s s d
i_bif2_body b s s d
#
# Internal calls.
#
-move S=c x==0 | call Ar P=f => i_move_call S P
-move S=s x==0 | call Ar P=f => move_call S P
-
-i_move_call c f
+move S=cxy x==0 | call Ar P=f => move_call S P
-%macro:move_call MoveCall -arg_f -size -nonext
move_call/2
+move_call cxy f
-move_call x f
-move_call y f
-
-move S=c x==0 | call_last Ar P=f D => i_move_call_last P D S
move S x==0 | call_last Ar P=f D => move_call_last S P D
-i_move_call_last f P c
-
-%macro:move_call_last MoveCallLast -arg_f -nonext -pack
-
move_call_last/3
-move_call_last x f Q
-move_call_last y f Q
+move_call_last cxy f Q
-move S=c x==0 | call_only Ar P=f => i_move_call_only P S
-move S=x x==0 | call_only Ar P=f => move_call_only S P
+move S=cx x==0 | call_only Ar P=f => move_call_only S P
-i_move_call_only f c
-
-%macro:move_call_only MoveCallOnly -arg_f -nonext
move_call_only/2
-
-move_call_only x f
+move_call_only cx f
call Ar Func => i_call Func
call_last Ar Func D => i_call_last Func D
call_only Ar Func => i_call_only Func
i_call f
-i_call_last f P
+i_call_last f Q
i_call_only f
i_call_ext e
-i_call_ext_last e P
+i_call_ext_last e Q
i_call_ext_only e
i_move_call_ext c e
-i_move_call_ext_last e P c
+i_move_call_ext_last e Q c
i_move_call_ext_only e c
# Fun calls.
@@ -1143,19 +1059,16 @@ i_move_call_ext_only e c
call_fun Arity | deallocate D | return => i_call_fun_last Arity D
call_fun Arity => i_call_fun Arity
-i_call_fun I
-i_call_fun_last I P
+i_call_fun t
+i_call_fun_last t Q
make_fun2 OldIndex=u => gen_make_fun2(OldIndex)
-%macro: i_make_fun MakeFun -pack
%cold
-i_make_fun I t
+i_make_fun W t
%hot
-%macro: is_function IsFunction -fail_action
-is_function f x
-is_function f y
+is_function f? xy
is_function Fail=f c => jump Fail
func_info M F A => i_func_info u M F A
@@ -1164,46 +1077,44 @@ func_info M F A => i_func_info u M F A
# New bit syntax matching (R11B).
# ================================================================
-%cold
+%warm
bs_start_match2 Fail=f ica X Y D => jump Fail
bs_start_match2 Fail Bin X Y D => i_bs_start_match2 Bin Fail X Y D
-i_bs_start_match2 x f I I d
-i_bs_start_match2 y f I I d
+i_bs_start_match2 xy f t t x
bs_save2 Reg Index => gen_bs_save(Reg, Index)
-i_bs_save2 x I
+i_bs_save2 x t
bs_restore2 Reg Index => gen_bs_restore(Reg, Index)
-i_bs_restore2 x I
+i_bs_restore2 x t
# Matching integers
bs_match_string Fail Ms Bits Val => i_bs_match_string Ms Fail Bits Val
-i_bs_match_string x f I I
+i_bs_match_string x f W W
# Fetching integers from binaries.
bs_get_integer2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \
gen_get_integer2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
-i_bs_get_integer_small_imm x I f I d
-i_bs_get_integer_imm x I I f I d
-i_bs_get_integer f I I s s d
-i_bs_get_integer_8 x f d
-i_bs_get_integer_16 x f d
-i_bs_get_integer_32 x f I d
+i_bs_get_integer_small_imm x W f? t x
+i_bs_get_integer_imm x W t f? t x
+i_bs_get_integer f? t t x s x
+i_bs_get_integer_8 x f? x
+i_bs_get_integer_16 x f? x
+
+%if ARCH_64
+i_bs_get_integer_32 x f? x
+%endif
# Fetching binaries from binaries.
bs_get_binary2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \
gen_get_binary2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
-%macro: i_bs_get_binary_imm2 BsGetBinaryImm_2 -fail_action -gen_dest
-%macro: i_bs_get_binary2 BsGetBinary_2 -fail_action -gen_dest
-%macro: i_bs_get_binary_all2 BsGetBinaryAll_2 -fail_action -gen_dest
-
-i_bs_get_binary_imm2 f x I I I d
-i_bs_get_binary2 f x I s I d
-i_bs_get_binary_all2 f x I I d
-i_bs_get_binary_all_reuse x f I
+i_bs_get_binary_imm2 f? x t W t x
+i_bs_get_binary2 f x t? s t x
+i_bs_get_binary_all2 f? x t t x
+i_bs_get_binary_all_reuse x f? t
# Fetching float from binaries.
bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \
@@ -1211,36 +1122,32 @@ bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \
bs_get_float2 Fail=f Ms=x Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail
-%macro: i_bs_get_float2 BsGetFloat2 -fail_action -gen_dest
-i_bs_get_float2 f x I s I d
+i_bs_get_float2 f? x t s t x
# Miscellanous
bs_skip_bits2 Fail=f Ms=x Sz=sq Unit=u Flags=u => \
gen_skip_bits2(Fail, Ms, Sz, Unit, Flags)
-%macro: i_bs_skip_bits_imm2 BsSkipBitsImm2 -fail_action
-i_bs_skip_bits_imm2 f x I
-
-%macro: i_bs_skip_bits2 BsSkipBits2 -fail_action
-i_bs_skip_bits2 f x x I
-i_bs_skip_bits2 f x y I
-
-%macro: i_bs_skip_bits_all2 BsSkipBitsAll2 -fail_action
-i_bs_skip_bits_all2 f x I
+i_bs_skip_bits_imm2 f? x W
+i_bs_skip_bits2 f? x xy t
+i_bs_skip_bits_all2 f? x t
bs_test_tail2 Fail=f Ms=x Bits=u==0 => bs_test_zero_tail2 Fail Ms
bs_test_tail2 Fail=f Ms=x Bits=u => bs_test_tail_imm2 Fail Ms Bits
-bs_test_zero_tail2 f x
-bs_test_tail_imm2 f x I
+bs_test_zero_tail2 f? x
+bs_test_tail_imm2 f? x W
bs_test_unit F Ms Unit=u==8 => bs_test_unit8 F Ms
-bs_test_unit f x I
-bs_test_unit8 f x
+bs_test_unit f? x t
+bs_test_unit8 f? x
# An y register operand for bs_context_to_binary is rare,
# but can happen because of inlining.
+bs_context_to_binary Y=y | line L | badmatch Y => \
+ move Y x | bs_context_to_binary x | line L | badmatch x
+
bs_context_to_binary Y=y => move Y x | bs_context_to_binary x
bs_context_to_binary x
@@ -1249,14 +1156,14 @@ bs_context_to_binary x
# Utf8/utf16/utf32 support. (R12B-5)
#
bs_get_utf8 Fail=f Ms=x u u Dst=d => i_bs_get_utf8 Ms Fail Dst
-i_bs_get_utf8 x f d
+i_bs_get_utf8 x f? x
bs_skip_utf8 Fail=f Ms=x u u => i_bs_get_utf8 Ms Fail x
bs_get_utf16 Fail=f Ms=x u Flags=u Dst=d => i_bs_get_utf16 Ms Fail Flags Dst
bs_skip_utf16 Fail=f Ms=x u Flags=u => i_bs_get_utf16 Ms Fail Flags x
-i_bs_get_utf16 x f I d
+i_bs_get_utf16 x f? t x
bs_get_utf32 Fail=f Ms=x Live=u Flags=u Dst=d => \
bs_get_integer2 Fail Ms Live i=32 u=1 Flags Dst | \
@@ -1265,22 +1172,18 @@ bs_skip_utf32 Fail=f Ms=x Live=u Flags=u => \
bs_get_integer2 Fail Ms Live i=32 u=1 Flags x | \
i_bs_validate_unicode_retract Fail x Ms
-i_bs_validate_unicode_retract j s s
+i_bs_validate_unicode_retract j s S
%hot
#
# Constructing binaries
#
-%cold
+%warm
bs_init2 Fail Sz Words Regs Flags Dst | binary_too_big(Sz) => system_limit Fail
-bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst | should_gen_heap_bin(Sz) => \
- i_bs_init_heap_bin Sz Regs Dst
bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init Sz Regs Dst
-bs_init2 Fail Sz=u Words Regs Flags Dst | should_gen_heap_bin(Sz) => \
- i_bs_init_heap_bin_heap Sz Words Regs Dst
bs_init2 Fail Sz=u Words Regs Flags Dst => \
i_bs_init_heap Sz Words Regs Dst
@@ -1289,16 +1192,13 @@ bs_init2 Fail Sz Words=u==0 Regs Flags Dst => \
bs_init2 Fail Sz Words Regs Flags Dst => \
i_bs_init_fail_heap Sz Words Fail Regs Dst
-i_bs_init_fail x j I d
-i_bs_init_fail y j I d
+i_bs_init_fail xy j? t? x
-i_bs_init_fail_heap s I j I d
+i_bs_init_fail_heap s I j? t? x
-i_bs_init I I d
-i_bs_init_heap_bin I I d
+i_bs_init W t? x
-i_bs_init_heap I I I d
-i_bs_init_heap_bin_heap I I I d
+i_bs_init_heap W I t? x
bs_init_bits Fail Sz=o Words Regs Flags Dst => system_limit Fail
@@ -1311,17 +1211,16 @@ bs_init_bits Fail Sz Words=u==0 Regs Flags Dst => \
bs_init_bits Fail Sz Words Regs Flags Dst => \
i_bs_init_bits_fail_heap Sz Words Fail Regs Dst
-i_bs_init_bits_fail x j I d
-i_bs_init_bits_fail y j I d
+i_bs_init_bits_fail xy j? t? x
-i_bs_init_bits_fail_heap s I j I d
+i_bs_init_bits_fail_heap s I j? t? x
-i_bs_init_bits I I d
-i_bs_init_bits_heap I I I d
+i_bs_init_bits W t? x
+i_bs_init_bits_heap W I t? x
bs_add Fail S1=i==0 S2 Unit=u==1 D => move S2 D
-bs_add j s s I d
+bs_add j? s s t? x
bs_append Fail Size Extra Live Unit Bin Flags Dst => \
move Bin x | i_bs_append Fail Extra Live Unit Size Dst
@@ -1331,8 +1230,8 @@ bs_private_append Fail Size Unit Bin Flags Dst => \
bs_init_writable
-i_bs_append j I I I s d
-i_bs_private_append j I s s d
+i_bs_append j? I t? t s x
+i_bs_private_append j? t s S x
#
# Storing integers into binaries.
@@ -1341,11 +1240,8 @@ i_bs_private_append j I s s d
bs_put_integer Fail=j Sz=sq Unit=u Flags=u Src=s => \
gen_put_integer(Fail, Sz, Unit, Flags, Src)
-%macro: i_new_bs_put_integer NewBsPutInteger
-%macro: i_new_bs_put_integer_imm NewBsPutIntegerImm
-
-i_new_bs_put_integer j s I s
-i_new_bs_put_integer_imm j I I s
+i_new_bs_put_integer j? s t s
+i_new_bs_put_integer_imm j? W t s
#
# Utf8/utf16/utf32 support. (R12B-5)
@@ -1353,22 +1249,22 @@ i_new_bs_put_integer_imm j I I s
bs_utf8_size j Src=s Dst=d => i_bs_utf8_size Src Dst
-i_bs_utf8_size s d
+i_bs_utf8_size s x
bs_utf16_size j Src=s Dst=d => i_bs_utf16_size Src Dst
-i_bs_utf16_size s d
+i_bs_utf16_size s x
bs_put_utf8 Fail u Src=s => i_bs_put_utf8 Fail Src
-i_bs_put_utf8 j s
+i_bs_put_utf8 j? s
-bs_put_utf16 j I s
+bs_put_utf16 j? t s
bs_put_utf32 Fail=j Flags=u Src=s => \
i_bs_validate_unicode Fail Src | bs_put_integer Fail i=32 u=1 Flags Src
-i_bs_validate_unicode j s
+i_bs_validate_unicode j? s
#
# Storing floats into binaries.
@@ -1378,11 +1274,8 @@ bs_put_float Fail Sz=q Unit Flags Val => badarg Fail
bs_put_float Fail=j Sz=s Unit=u Flags=u Src=s => \
gen_put_float(Fail, Sz, Unit, Flags, Src)
-%macro: i_new_bs_put_float NewBsPutFloat
-%macro: i_new_bs_put_float_imm NewBsPutFloatImm
-
-i_new_bs_put_float j s I s
-i_new_bs_put_float_imm j I I s
+i_new_bs_put_float j? s t s
+i_new_bs_put_float_imm j? W t s
#
# Storing binaries into binaries.
@@ -1391,14 +1284,9 @@ i_new_bs_put_float_imm j I I s
bs_put_binary Fail=j Sz=s Unit=u Flags=u Src=s => \
gen_put_binary(Fail, Sz, Unit, Flags, Src)
-%macro: i_new_bs_put_binary NewBsPutBinary
-i_new_bs_put_binary j s I s
-
-%macro: i_new_bs_put_binary_imm NewBsPutBinaryImm
-i_new_bs_put_binary_imm j I s
-
-%macro: i_new_bs_put_binary_all NewBsPutBinaryAll
-i_new_bs_put_binary_all j s I
+i_new_bs_put_binary j? s t s
+i_new_bs_put_binary_imm j? W s
+i_new_bs_put_binary_all j? s t
#
# Warning: The i_bs_put_string and i_new_bs_put_string instructions
@@ -1406,9 +1294,7 @@ i_new_bs_put_binary_all j s I
# Don't change the instruction format unless you change the loader too.
#
-bs_put_string I I
-
-%hot
+bs_put_string W W
#
# New floating point instructions (R8).
@@ -1422,11 +1308,13 @@ fnegate p FR1 FR2 => i_fnegate FR1 FR2
fconv Arg=iqan Dst=l => move Arg x | fconv x Dst
-fmove q l
-fmove d l
-fmove l d
+fmove Arg=l Dst=d => fstore Arg Dst
+fmove Arg=dq Dst=l => fload Arg Dst
+
+fstore l d
+fload Sq l
-fconv d l
+fconv S l
i_fadd l l l
i_fsub l l l
@@ -1436,52 +1324,88 @@ i_fnegate l l
fclearerror | no_fpe_signals() =>
fcheckerror p | no_fpe_signals() =>
+
+%unless NO_FPE_SIGNALS
fcheckerror p => i_fcheckerror
i_fcheckerror
fclearerror
+%endif
+
+%hot
#
# New apply instructions in R10B.
#
-apply I
-apply_last I P
+apply t
+apply_last t Q
+
+#
+# Handle compatibility with OTP 17 here.
+#
+
+i_put_map_assoc/4
+
+# We KNOW that in OTP 20 (actually OTP 18 and higher), a put_map_assoc instruction
+# is always preceded by an is_map test. That means that put_map_assoc can never
+# fail and does not need any failure label.
+
+put_map_assoc Fail Map Dst Live Size Rest=* | compiled_with_otp_20_or_higher() => \
+ i_put_map_assoc Map Dst Live Size Rest
+
+# Translate the put_map_assoc instruction if the module was compiled by a compiler
+# before 20. This is only necessary if the OTP 17 compiler was used, but we
+# have no safe and relatively easy way to know whether OTP 18/19 was used.
+
+put_map_assoc Fail=p Map Dst Live Size Rest=* => \
+ ensure_map Map | i_put_map_assoc Map Dst Live Size Rest
+put_map_assoc Fail=f Map Dst Live Size Rest=* => \
+ is_map Fail Map | i_put_map_assoc Map Dst Live Size Rest
+
+ensure_map Lit=q | literal_is_map(Lit) =>
+ensure_map Src=cqy => move Src x | ensure_map x
+
+%cold
+ensure_map x
+%hot
#
-# Map instructions in R17.
+# Map instructions. First introduced in R17.
#
-sorted_put_map_assoc/5
-put_map_assoc F Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \
- sorted_put_map_assoc F Map Dst Live Size Rest
+sorted_put_map_assoc/4
+i_put_map_assoc Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \
+ sorted_put_map_assoc Map Dst Live Size Rest
sorted_put_map_exact/5
put_map_exact F Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \
sorted_put_map_exact F Map Dst Live Size Rest
-sorted_put_map_assoc j Map Dst Live Size Rest=* | is_empty_map(Map) => \
+sorted_put_map_assoc Map Dst Live Size Rest=* | is_empty_map(Map) => \
new_map Dst Live Size Rest
-sorted_put_map_assoc F Src=s Dst Live Size Rest=* => \
- update_map_assoc F Src Dst Live Size Rest
-sorted_put_map_assoc F Src Dst Live Size Rest=* => \
- move Src x | update_map_assoc F x Dst Live Size Rest
+sorted_put_map_assoc Src=s Dst Live Size Rest=* => \
+ update_map_assoc Src Dst Live Size Rest
+sorted_put_map_assoc Src Dst Live Size Rest=* => \
+ move Src x | update_map_assoc x Dst Live Size Rest
sorted_put_map_exact F Src=s Dst Live Size Rest=* => \
update_map_exact F Src Dst Live Size Rest
sorted_put_map_exact F Src Dst Live Size Rest=* => \
move Src x | update_map_exact F x Dst Live Size Rest
-new_map d I I
-update_map_assoc j s d I I
-update_map_exact j s d I I
+new_map Dst Live Size Rest=* | is_small_map_literal_keys(Size, Rest) => \
+ gen_new_small_map_lit(Dst, Live, Size, Rest)
+
+new_map d t I
+i_new_small_map_lit d t q
+update_map_assoc s d t I
+update_map_exact j? s d t I
is_map Fail Lit=q | literal_is_map(Lit) =>
is_map Fail cq => jump Fail
-%macro: is_map IsMap -fail_action
-is_map f x
-is_map f y
+is_map f? xy
## Transform has_map_fields #{ K1 := _, K2 := _ } to has_map_elements
@@ -1490,27 +1414,20 @@ has_map_fields Fail Src Size Rest=* => \
## Transform get_map_elements(s) #{ K1 := V1, K2 := V2 }
-get_map_elements Fail Src=xy Size=u==2 Rest=* => \
+get_map_elements Fail Src Size=u==2 Rest=* => \
gen_get_map_element(Fail, Src, Size, Rest)
get_map_elements Fail Src Size Rest=* | map_key_sort(Size, Rest) => \
gen_get_map_elements(Fail, Src, Size, Rest)
-i_get_map_elements f s I
-
-i_get_map_element Fail Src=xy Key=y Dst => \
- move Key x | i_get_map_element Fail Src x Dst
+i_get_map_elements f? s I
-%macro: i_get_map_element_hash GetMapElementHash -fail_action
-i_get_map_element_hash f x c I x
-i_get_map_element_hash f y c I x
-i_get_map_element_hash f x c I y
-i_get_map_element_hash f y c I y
+i_get_map_element_hash Fail Src=c Key Hash Dst => \
+ move Src x | i_get_map_element_hash Fail x Key Hash Dst
+i_get_map_element_hash f? xy c I xy
-%macro: i_get_map_element GetMapElement -fail_action
-i_get_map_element f x x x
-i_get_map_element f y x x
-i_get_map_element f x x y
-i_get_map_element f y x y
+i_get_map_element Fail Src=c Key Dst => \
+ move Src x | i_get_map_element Fail x Key Dst
+i_get_map_element f? xy xy xy
#
# Convert the plus operations to a generic plus instruction.
@@ -1545,9 +1462,9 @@ gen_minus p Live Reg=d Int=i Dst | negation_is_small(Int) => \
# GCing arithmetic instructions.
#
-gen_plus Fail Live S1 S2 Dst => i_plus Fail Live S1 S2 Dst
+gen_plus Fail Live S1 S2 Dst => i_plus S1 S2 Fail Live Dst
-gen_minus Fail Live S1 S2 Dst => i_minus Fail Live S1 S2 Dst
+gen_minus Fail Live S1 S2 Dst => i_minus S1 S2 Fail Live Dst
gc_bif2 Fail Live u$bif:erlang:stimes/2 S1 S2 Dst => \
i_times Fail Live S1 S2 Dst
@@ -1558,15 +1475,15 @@ gc_bif2 Fail Live u$bif:erlang:intdiv/2 S1 S2 Dst => \
i_int_div Fail Live S1 S2 Dst
gc_bif2 Fail Live u$bif:erlang:rem/2 S1 S2 Dst => \
- i_rem Fail Live S1 S2 Dst
+ i_rem S1 S2 Fail Live Dst
gc_bif2 Fail Live u$bif:erlang:bsl/2 S1 S2 Dst => \
- i_bsl Fail Live S1 S2 Dst
+ i_bsl S1 S2 Fail Live Dst
gc_bif2 Fail Live u$bif:erlang:bsr/2 S1 S2 Dst => \
- i_bsr Fail Live S1 S2 Dst
+ i_bsr S1 S2 Fail Live Dst
gc_bif2 Fail Live u$bif:erlang:band/2 S1 S2 Dst => \
- i_band Fail Live S1 S2 Dst
+ i_band S1 S2 Fail Live Dst
gc_bif2 Fail Live u$bif:erlang:bor/2 S1 S2 Dst => \
i_bor Fail Live S1 S2 Dst
@@ -1576,35 +1493,34 @@ gc_bif2 Fail Live u$bif:erlang:bxor/2 S1 S2 Dst => \
gc_bif1 Fail I u$bif:erlang:bnot/1 Src Dst=d => i_int_bnot Fail Src I Dst
-i_increment r I I d
-i_increment x I I d
-i_increment y I I d
+i_increment rxy W t d
+
+i_plus x xy j? t d
+i_plus s s j? t d
-i_plus j I x x d
-i_plus j I x y d
-i_plus j I s s d
+i_minus x x j? t d
+i_minus s s j? t d
-i_minus j I x x d
-i_minus j I s s d
+i_times j? t s s d
-i_times j I s s d
+i_m_div j? t s s d
+i_int_div j? t s s d
-i_m_div j I s s d
-i_int_div j I s s d
+i_rem x x j? t d
+i_rem s s j? t d
-i_rem j I x x d
-i_rem j I s s d
+i_bsl s s j? t d
+i_bsr s s j? t d
-i_bsl j I s s d
-i_bsr j I s s d
+i_band x c j? t d
+i_band s s j? t d
-i_band j I x c d
-i_band j I s s d
+i_bor j? I s s d
+i_bxor j? I s s d
-i_bor j I s s d
-i_bxor j I s s d
+i_int_bnot Fail Src=c Live Dst => move Src x | i_int_bnot Fail x Live Dst
-i_int_bnot j s I d
+i_int_bnot j? S t d
#
# Old guard BIFs that creates heap fragments are no longer allowed.
@@ -1628,9 +1544,9 @@ gc_bif2 Fail I Bif S1 S2 Dst => \
gc_bif3 Fail I Bif S1 S2 S3 Dst => \
gen_guard_bif3(Fail, I, Bif, S1, S2, S3, Dst)
-i_gc_bif1 j I s I d
+i_gc_bif1 j? W s t? d
-i_gc_bif2 j I I s s d
+i_gc_bif2 j? W t? s s d
ii_gc_bif3/7
@@ -1639,7 +1555,7 @@ ii_gc_bif3/7
ii_gc_bif3 Fail Bif Live S1 S2 S3 Dst => \
move S1 x | i_gc_bif3 Fail Bif Live S2 S3 Dst
-i_gc_bif3 j I I s s d
+i_gc_bif3 j? W t? s s d
#
# The following instruction is specially handled in beam_load.c
@@ -1657,8 +1573,20 @@ on_load
#
# R14A.
#
-recv_mark f
+# Modified in OTP 21 because it turns out that we don't need the
+# label after all.
+#
+
+recv_mark f => i_recv_mark
+i_recv_mark
recv_set Fail | label Lbl | loop_rec Lf Reg => \
i_recv_set | label Lbl | loop_rec Lf Reg
i_recv_set
+
+#
+# OTP 21.
+#
+
+build_stacktrace
+raw_raise
diff --git a/erts/emulator/beam/packet_parser.c b/erts/emulator/beam/packet_parser.c
index f14910bc72..4b526887b5 100644
--- a/erts/emulator/beam/packet_parser.c
+++ b/erts/emulator/beam/packet_parser.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -200,7 +200,7 @@ static int http_init(void)
for (i = 0; i < HTTP_HDR_HASH_SIZE; i++)
http_hdr_hash[i] = NULL;
for (i = 0; http_hdr_strings[i] != NULL; i++) {
- ASSERT(strlen(http_hdr_strings[i]) <= HTTP_MAX_NAME_LEN);
+ ASSERT(sys_strlen(http_hdr_strings[i]) <= HTTP_MAX_NAME_LEN);
http_hdr_table[i].index = i;
http_hash_insert(http_hdr_strings[i],
&http_hdr_table[i],
@@ -516,7 +516,7 @@ static http_atom_t* http_hash_lookup(const char* name, int len,
while (ap != NULL) {
if ((ap->h == h) && (ap->len == len) &&
- (strncmp(ap->name, name, len) == 0))
+ (sys_strncmp(ap->name, name, len) == 0))
return ap;
ap = ap->next;
}
@@ -656,7 +656,7 @@ int packet_parse_http(const char* buf, int len, int* statep,
if (*statep == 0) {
/* start-line = Request-Line | Status-Line */
- if (n >= 5 && (strncmp(buf, "HTTP/", 5) == 0)) {
+ if (n >= 5 && (sys_strncmp(buf, "HTTP/", 5) == 0)) {
int major = 0;
int minor = 0;
int status = 0;
@@ -750,7 +750,7 @@ int packet_parse_http(const char* buf, int len, int* statep,
}
if (n < 8)
return -1;
- if (strncmp(ptr, "HTTP/", 5) != 0)
+ if (sys_strncmp(ptr, "HTTP/", 5) != 0)
return -1;
ptr += 5;
n -= 5;
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index ac7096745e..c7e02c6d48 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,16 +38,15 @@ static Hash process_reg;
#define REG_HASH(term) ((HashValue) atom_val(term))
-static erts_smp_rwmtx_t regtab_rwmtx;
+static erts_rwmtx_t regtab_rwmtx;
-#define reg_try_read_lock() erts_smp_rwmtx_tryrlock(&regtab_rwmtx)
-#define reg_try_write_lock() erts_smp_rwmtx_tryrwlock(&regtab_rwmtx)
-#define reg_read_lock() erts_smp_rwmtx_rlock(&regtab_rwmtx)
-#define reg_write_lock() erts_smp_rwmtx_rwlock(&regtab_rwmtx)
-#define reg_read_unlock() erts_smp_rwmtx_runlock(&regtab_rwmtx)
-#define reg_write_unlock() erts_smp_rwmtx_rwunlock(&regtab_rwmtx)
+#define reg_try_read_lock() erts_rwmtx_tryrlock(&regtab_rwmtx)
+#define reg_try_write_lock() erts_rwmtx_tryrwlock(&regtab_rwmtx)
+#define reg_read_lock() erts_rwmtx_rlock(&regtab_rwmtx)
+#define reg_write_lock() erts_rwmtx_rwlock(&regtab_rwmtx)
+#define reg_read_unlock() erts_rwmtx_runlock(&regtab_rwmtx)
+#define reg_write_unlock() erts_rwmtx_rwunlock(&regtab_rwmtx)
-#ifdef ERTS_SMP
static ERTS_INLINE void
reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks)
{
@@ -64,7 +63,7 @@ reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks)
}
/* Release process locks in order to avoid deadlock */
- erts_smp_proc_unlock(c_p, *c_p_locks);
+ erts_proc_unlock(c_p, *c_p_locks);
*c_p_locks = 0;
}
@@ -87,14 +86,13 @@ reg_safe_write_lock(Process *c_p, ErtsProcLocks *c_p_locks)
}
/* Release process locks in order to avoid deadlock */
- erts_smp_proc_unlock(c_p, *c_p_locks);
+ erts_proc_unlock(c_p, *c_p_locks);
*c_p_locks = 0;
}
reg_write_lock();
}
-#endif
static ERTS_INLINE int
is_proc_alive(Process *p)
@@ -102,7 +100,7 @@ is_proc_alive(Process *p)
return !ERTS_PROC_IS_EXITING(p);
}
-void register_info(int to, void *to_arg)
+void register_info(fmtfn_t to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
@@ -141,11 +139,12 @@ static void reg_free(RegProc *obj)
void init_register_table(void)
{
HashFunctions f;
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab");
+ erts_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) reg_hash;
f.cmp = (HCMP_FUN) reg_cmp;
@@ -174,7 +173,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
Process *proc = NULL;
Port *port = NULL;
RegProc r, *rp;
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
if (is_not_atom(name) || name == am_undefined)
return res;
@@ -184,7 +183,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
else {
if (is_not_internal_pid(id) && is_not_internal_port(id))
return res;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
if (is_internal_port(id)) {
port = erts_id2port(id);
if (!port)
@@ -192,15 +191,13 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
}
}
-#ifdef ERTS_SMP
{
ErtsProcLocks proc_locks = proc ? ERTS_PROC_LOCK_MAIN : 0;
reg_safe_write_lock(proc, &proc_locks);
if (proc && !proc_locks)
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-#endif
if (is_internal_pid(id)) {
if (!proc)
@@ -214,7 +211,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
}
else {
ASSERT(!INVALID_PORT(port, id));
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(port));
r.pt = port;
if (r.pt->common.u.alive.reg)
goto done;
@@ -249,8 +246,8 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
erts_port_release(port);
if (c_p != proc) {
if (proc)
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
return res;
}
@@ -271,17 +268,15 @@ erts_whereis_name_to_id(Process *c_p, Eterm name)
HashValue hval;
int ix;
HashBucket* b;
-#ifdef ERTS_SMP
- ErtsProcLocks c_p_locks = c_p ? ERTS_PROC_LOCK_MAIN : 0;
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (c_p) ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
-#endif
-
+ ErtsProcLocks c_p_locks = 0;
+ if (c_p) {
+ c_p_locks = ERTS_PROC_LOCK_MAIN;
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+ }
reg_safe_read_lock(c_p, &c_p_locks);
+
if (c_p && !c_p_locks)
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
-#endif
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
hval = REG_HASH(name);
ix = hval % process_reg.size;
@@ -330,7 +325,6 @@ erts_whereis_name(Process *c_p,
HashValue hval;
int ix;
HashBucket* b;
-#ifdef ERTS_SMP
ErtsProcLocks current_c_p_locks;
Port *pending_port = NULL;
@@ -347,7 +341,6 @@ erts_whereis_name(Process *c_p,
* - read reg lock
* - current_c_p_locks (either c_p_locks or 0) on c_p
*/
-#endif
hval = REG_HASH(name);
ix = hval % process_reg.size;
@@ -369,7 +362,6 @@ erts_whereis_name(Process *c_p,
if (!rp)
*proc = NULL;
else {
-#ifdef ERTS_SMP
if (!rp->p)
*proc = NULL;
else {
@@ -386,17 +378,10 @@ erts_whereis_name(Process *c_p,
*proc = rp->p;
else {
if (need_locks)
- erts_smp_proc_unlock(rp->p, need_locks);
+ erts_proc_unlock(rp->p, need_locks);
*proc = NULL;
}
}
-#else
- if (rp->p
- && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p)))
- *proc = rp->p;
- else
- *proc = NULL;
-#endif
if (*proc && (flags & ERTS_P2P_FLG_INC_REFC))
erts_proc_inc_refc(*proc);
}
@@ -406,7 +391,6 @@ erts_whereis_name(Process *c_p,
if (!rp || !rp->pt)
*port = NULL;
else {
-#ifdef ERTS_SMP
if (lock_port) {
if (pending_port == rp->pt)
pending_port = NULL;
@@ -418,11 +402,11 @@ erts_whereis_name(Process *c_p,
pending_port = NULL;
}
- if (erts_smp_port_trylock(rp->pt) == EBUSY) {
+ if (erts_port_trylock(rp->pt) == EBUSY) {
Eterm id = rp->pt->common.id; /* id read only... */
/* Unlock all locks, acquire port lock, and restart... */
if (current_c_p_locks) {
- erts_smp_proc_unlock(c_p, current_c_p_locks);
+ erts_proc_unlock(c_p, current_c_p_locks);
current_c_p_locks = 0;
}
reg_read_unlock();
@@ -430,19 +414,16 @@ erts_whereis_name(Process *c_p,
goto restart;
}
}
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(rp->pt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(rp->pt));
}
-#endif
*port = rp->pt;
}
}
-#ifdef ERTS_SMP
if (c_p && !current_c_p_locks)
- erts_smp_proc_lock(c_p, c_p_locks);
+ erts_proc_lock(c_p, c_p_locks);
if (pending_port)
erts_port_release(pending_port);
-#endif
reg_read_unlock();
}
@@ -475,7 +456,6 @@ int erts_unregister_name(Process *c_p,
RegProc r, *rp;
Port *port = c_prt;
ErtsProcLocks current_c_p_locks = 0;
-#ifdef ERTS_SMP
/*
* SMP note: If 'c_prt != NULL' and 'c_prt->reg->name == name',
@@ -491,18 +471,15 @@ int erts_unregister_name(Process *c_p,
restart:
reg_safe_write_lock(c_p, &current_c_p_locks);
-#endif
r.name = name;
if (is_non_value(name)) {
/* Unregister current process name */
ASSERT(c_p);
-#ifdef ERTS_SMP
if (current_c_p_locks != c_p_locks) {
- erts_smp_proc_lock(c_p, c_p_locks);
+ erts_proc_lock(c_p, c_p_locks);
current_c_p_locks = c_p_locks;
}
-#endif
if (c_p->common.u.alive.reg) {
r.name = c_p->common.u.alive.reg->name;
} else {
@@ -515,36 +492,34 @@ int erts_unregister_name(Process *c_p,
if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) {
if (rp->pt) {
if (port != rp->pt) {
-#ifdef ERTS_SMP
if (port) {
ASSERT(port != c_prt);
erts_port_release(port);
port = NULL;
}
- if (erts_smp_port_trylock(rp->pt) == EBUSY) {
+ if (erts_port_trylock(rp->pt) == EBUSY) {
Eterm id = rp->pt->common.id; /* id read only... */
/* Unlock all locks, acquire port lock, and restart... */
if (current_c_p_locks) {
- erts_smp_proc_unlock(c_p, current_c_p_locks);
+ erts_proc_unlock(c_p, current_c_p_locks);
current_c_p_locks = 0;
}
reg_write_unlock();
port = erts_id2port(id);
goto restart;
}
-#endif
port = rp->pt;
}
ASSERT(rp->pt == port);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(port));
rp->pt->common.u.alive.reg = NULL;
if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
if (current_c_p_locks) {
- erts_smp_proc_unlock(c_p, current_c_p_locks);
+ erts_proc_unlock(c_p, current_c_p_locks);
current_c_p_locks = 0;
}
trace_port(port, am_unregister, r.name);
@@ -552,7 +527,6 @@ int erts_unregister_name(Process *c_p,
} else if (rp->p) {
-#ifdef ERTS_SMP
erts_proc_safelock(c_p,
current_c_p_locks,
c_p_locks,
@@ -560,17 +534,14 @@ int erts_unregister_name(Process *c_p,
(c_p == rp->p) ? current_c_p_locks : 0,
ERTS_PROC_LOCK_MAIN);
current_c_p_locks = c_p_locks;
-#endif
rp->p->common.u.alive.reg = NULL;
if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) {
trace_proc(rp->p, (c_p == rp->p) ? c_p_locks : ERTS_PROC_LOCK_MAIN,
rp->p, am_unregister, r.name);
}
-#ifdef ERTS_SMP
if (rp->p != c_p) {
- erts_smp_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN);
}
-#endif
}
hash_erase(&process_reg, (void*) &r);
res = 1;
@@ -584,14 +555,12 @@ int erts_unregister_name(Process *c_p,
erts_port_release(port);
}
if (c_prt) {
- erts_smp_port_lock(c_prt);
+ erts_port_lock(c_prt);
}
}
-#ifdef ERTS_SMP
if (c_p && !current_c_p_locks) {
- erts_smp_proc_lock(c_p, c_p_locks);
+ erts_proc_lock(c_p, c_p_locks);
}
-#endif
return res;
}
@@ -632,14 +601,12 @@ BIF_RETTYPE registered_0(BIF_ALIST_0)
Uint need;
Eterm* hp;
HashBucket **bucket;
-#ifdef ERTS_SMP
ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN;
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P);
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P);
reg_safe_read_lock(BIF_P, &proc_locks);
if (!proc_locks)
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
bucket = process_reg.bucket;
diff --git a/erts/emulator/beam/register.h b/erts/emulator/beam/register.h
index d839f55d6b..27a314ca78 100644
--- a/erts/emulator/beam/register.h
+++ b/erts/emulator/beam/register.h
@@ -44,7 +44,7 @@ typedef struct reg_proc
int process_reg_size(void);
int process_reg_sz(void);
void init_register_table(void);
-void register_info(int, void *);
+void register_info(fmtfn_t, void *);
int erts_register_name(Process *, Eterm, Eterm);
Eterm erts_whereis_name_to_id(Process *, Eterm);
void erts_whereis_name(Process *, ErtsProcLocks,
diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c
index 30b26a7296..0d816a81a9 100644
--- a/erts/emulator/beam/safe_hash.c
+++ b/erts/emulator/beam/safe_hash.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -62,7 +62,7 @@ static ERTS_INLINE int align_up_pow2(int val)
*/
static void rehash(SafeHash* h, int grow_limit)
{
- if (erts_smp_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) {
+ if (erts_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) {
return; /* already in progress */
}
if (h->grow_limit == grow_limit) {
@@ -77,7 +77,7 @@ static void rehash(SafeHash* h, int grow_limit)
sys_memzero(new_tab, bytes);
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { /* stop all traffic */
- erts_smp_mtx_lock(&h->lock_vec[i].mtx);
+ erts_mtx_lock(&h->lock_vec[i].mtx);
}
h->tab = new_tab;
@@ -95,12 +95,12 @@ static void rehash(SafeHash* h, int grow_limit)
}
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
- erts_smp_mtx_unlock(&h->lock_vec[i].mtx);
+ erts_mtx_unlock(&h->lock_vec[i].mtx);
}
erts_free(h->type, (void *) old_tab);
}
/*else already done */
- erts_smp_atomic_set_relb(&h->is_rehashing, 0);
+ erts_atomic_set_relb(&h->is_rehashing, 0);
}
@@ -115,7 +115,7 @@ void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h)
int objects = 0;
for (lock_ix=0; lock_ix<SAFE_HASH_LOCK_CNT; lock_ix++) {
- erts_smp_mtx_lock(&h->lock_vec[lock_ix].mtx);
+ erts_mtx_lock(&h->lock_vec[lock_ix].mtx);
size = h->size_mask + 1;
for (i = lock_ix; i < size; i += SAFE_HASH_LOCK_CNT) {
int depth = 0;
@@ -128,7 +128,7 @@ void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h)
if (depth > max_depth)
max_depth = depth;
}
- erts_smp_mtx_unlock(&h->lock_vec[lock_ix].mtx);
+ erts_mtx_unlock(&h->lock_vec[lock_ix].mtx);
}
hi->name = h->name;
@@ -145,9 +145,9 @@ int safe_hash_table_sz(SafeHash *h)
int i, size;
for(i=0; h->name[i]; i++);
i++;
- erts_smp_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */
+ erts_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */
size = h->size_mask + 1;
- erts_smp_mtx_unlock(&h->lock_vec[0].mtx);
+ erts_mtx_unlock(&h->lock_vec[0].mtx);
return sizeof(SafeHash) + size*sizeof(SafeHashBucket*) + i;
}
@@ -155,7 +155,8 @@ int safe_hash_table_sz(SafeHash *h)
** Init a pre allocated or static hash structure
** and allocate buckets. NOT SAFE
*/
-SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size, SafeHashFunctions fun)
+SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, erts_lock_flags_t flags,
+ int size, SafeHashFunctions fun)
{
int i, bytes;
@@ -167,10 +168,11 @@ SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size,
h->name = name;
h->fun = fun;
set_size(h,size);
- erts_smp_atomic_init_nob(&h->is_rehashing, 0);
- erts_smp_atomic_init_nob(&h->nitems, 0);
+ erts_atomic_init_nob(&h->is_rehashing, 0);
+ erts_atomic_init_nob(&h->nitems, 0);
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
- erts_smp_mtx_init(&h->lock_vec[i].mtx,"safe_hash");
+ erts_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL,
+ flags);
}
return h;
}
@@ -183,8 +185,8 @@ void* safe_hash_get(SafeHash* h, void* tmpl)
{
SafeHashValue hval = h->fun.hash(tmpl);
SafeHashBucket* b;
- erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
- erts_smp_mtx_lock(lock);
+ erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
+ erts_mtx_lock(lock);
b = h->tab[hval & h->size_mask];
while(b != NULL) {
@@ -192,7 +194,7 @@ void* safe_hash_get(SafeHash* h, void* tmpl)
break;
b = b->next;
}
- erts_smp_mtx_unlock(lock);
+ erts_mtx_unlock(lock);
return (void*) b;
}
@@ -205,13 +207,13 @@ void* safe_hash_put(SafeHash* h, void* tmpl)
SafeHashValue hval = h->fun.hash(tmpl);
SafeHashBucket* b;
SafeHashBucket** head;
- erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
- erts_smp_mtx_lock(lock);
+ erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
+ erts_mtx_lock(lock);
head = &h->tab[hval & h->size_mask];
b = *head;
while(b != NULL) {
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
- erts_smp_mtx_unlock(lock);
+ erts_mtx_unlock(lock);
return b;
}
b = b->next;
@@ -222,8 +224,8 @@ void* safe_hash_put(SafeHash* h, void* tmpl)
b->next = *head;
*head = b;
grow_limit = h->grow_limit;
- erts_smp_mtx_unlock(lock);
- if (erts_smp_atomic_inc_read_nob(&h->nitems) > grow_limit) {
+ erts_mtx_unlock(lock);
+ if (erts_atomic_inc_read_nob(&h->nitems) > grow_limit) {
rehash(h, grow_limit);
}
return (void*) b;
@@ -238,40 +240,58 @@ void* safe_hash_erase(SafeHash* h, void* tmpl)
SafeHashValue hval = h->fun.hash(tmpl);
SafeHashBucket* b;
SafeHashBucket** prevp;
- erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
- erts_smp_mtx_lock(lock);
+ erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
+ erts_mtx_lock(lock);
prevp = &h->tab[hval & h->size_mask];
b = *prevp;
while(b != NULL) {
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
*prevp = b->next;
- erts_smp_mtx_unlock(lock);
- erts_smp_atomic_dec_nob(&h->nitems);
+ erts_mtx_unlock(lock);
+ erts_atomic_dec_nob(&h->nitems);
h->fun.free((void*)b);
return tmpl;
}
prevp = &b->next;
b = b->next;
}
- erts_smp_mtx_unlock(lock);
+ erts_mtx_unlock(lock);
return NULL;
}
/*
-** Call 'func(obj,func_arg2)' for all objects in table. NOT SAFE!!!
+** Call 'func(obj,func_arg2,func_arg3)' for all objects in table. NOT SAFE!!!
*/
-void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *), void *func_arg2)
+void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *, void *),
+ void *func_arg2, void *func_arg3)
{
int i;
for (i = 0; i <= h->size_mask; i++) {
SafeHashBucket* b = h->tab[i];
while (b != NULL) {
- (*func)((void *) b, func_arg2);
+ (*func)((void *) b, func_arg2, func_arg3);
b = b->next;
}
}
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_hash_lock_count(SafeHash *h, erts_lock_flags_t flags, int enable) {
+ int i;
+
+ for(i = 0; i < SAFE_HASH_LOCK_CNT; i++) {
+ erts_mtx_t *lock = &h->lock_vec[i].mtx;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&lock->lcnt, "safe_hash", NIL,
+ ERTS_LOCK_TYPE_MUTEX | flags);
+ } else {
+ erts_lcnt_uninstall(&lock->lcnt);
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
+
#endif /* !ERTS_SYS_CONTINOUS_FD_NUMBERS */
diff --git a/erts/emulator/beam/safe_hash.h b/erts/emulator/beam/safe_hash.h
index 285103cb17..bd81e3022b 100644
--- a/erts/emulator/beam/safe_hash.h
+++ b/erts/emulator/beam/safe_hash.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -28,6 +28,7 @@
#include "sys.h"
#include "erl_alloc.h"
+#include "erl_lock_flags.h"
typedef unsigned long SafeHashValue;
@@ -72,11 +73,11 @@ typedef struct
int size_mask; /* (RW) Number of slots - 1 */
SafeHashBucket** tab; /* (RW) Vector of bucket pointers (objects) */
int grow_limit; /* (RW) Threshold for growing table */
- erts_smp_atomic_t nitems; /* (A) Number of items in table */
- erts_smp_atomic_t is_rehashing; /* (A) Table rehashing in progress */
+ erts_atomic_t nitems; /* (A) Number of items in table */
+ erts_atomic_t is_rehashing; /* (A) Table rehashing in progress */
union {
- erts_smp_mtx_t mtx;
+ erts_mtx_t mtx;
byte __cache_line__[64];
}lock_vec[SAFE_HASH_LOCK_CNT];
@@ -85,7 +86,7 @@ typedef struct
/* A: Lockless atomics */
} SafeHash;
-SafeHash* safe_hash_init(ErtsAlcType_t, SafeHash*, char*, int, SafeHashFunctions);
+SafeHash* safe_hash_init(ErtsAlcType_t, SafeHash*, char*, erts_lock_flags_t, int, SafeHashFunctions);
void safe_hash_get_info(SafeHashInfo*, SafeHash*);
int safe_hash_table_sz(SafeHash *);
@@ -94,7 +95,11 @@ void* safe_hash_get(SafeHash*, void*);
void* safe_hash_put(SafeHash*, void*);
void* safe_hash_erase(SafeHash*, void*);
-void safe_hash_for_each(SafeHash*, void (*func)(void *, void *), void *);
+void safe_hash_for_each(SafeHash*, void (*func)(void *, void *, void *), void *, void *);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_hash_lock_count(SafeHash*, erts_lock_flags_t, int);
+#endif
#endif /* __SAFE_HASH_H__ */
diff --git a/erts/emulator/beam/select_instrs.tab b/erts/emulator/beam/select_instrs.tab
new file mode 100644
index 0000000000..2951949d38
--- /dev/null
+++ b/erts/emulator/beam/select_instrs.tab
@@ -0,0 +1,190 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+i_select_val_bins := select_val_bins.fetch.select;
+
+select_val_bins.head() {
+ Eterm select_val;
+}
+
+select_val_bins.fetch(Src) {
+ select_val = $Src;
+}
+
+select_val_bins.select(Fail, NumElements) {
+ struct Singleton {
+ BeamInstr val;
+ };
+ struct Singleton* low;
+ struct Singleton* high;
+ struct Singleton* mid;
+ int bdiff; /* int not long because the arrays aren't that large */
+
+ low = (struct Singleton *) ($NEXT_INSTRUCTION);
+ high = low + $NumElements;
+
+ /* The pointer subtraction (high-low) below must produce
+ * a signed result, because high could be < low. That
+ * requires the compiler to insert quite a bit of code.
+ *
+ * However, high will be > low so the result will be
+ * positive. We can use that knowledge to optimise the
+ * entire sequence, from the initial comparison to the
+ * computation of mid.
+ *
+ * -- Mikael Pettersson, Acumem AB
+ *
+ * Original loop control code:
+ *
+ * while (low < high) {
+ * mid = low + (high-low) / 2;
+ *
+ */
+ while ((bdiff = (int)((char*)high - (char*)low)) > 0) {
+ unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Singleton)-1);
+
+ mid = (struct Singleton*)((char*)low + boffset);
+ if (select_val < mid->val) {
+ high = mid;
+ } else if (select_val > mid->val) {
+ low = mid + 1;
+ } else {
+ Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION + $NumElements);
+ Sint32 offset = jump_tab[mid - (struct Singleton *)($NEXT_INSTRUCTION)];
+ $JUMP(offset);
+ }
+ }
+ $JUMP($Fail);
+}
+
+i_select_tuple_arity2 := select_val2.src.get_arity.execute;
+i_select_val2 := select_val2.src.execute;
+
+select_val2.head() {
+ Eterm select_val2;
+}
+
+select_val2.src(Src) {
+ select_val2 = $Src;
+}
+
+select_val2.get_arity() {
+ if (ERTS_LIKELY(is_tuple(select_val2))) {
+ select_val2 = *tuple_val(select_val2);
+ } else {
+ select_val2 = NIL;
+ }
+}
+
+select_val2.execute(Fail, T1, T2) {
+ Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION);
+
+ if (select_val2 == $T1) {
+ $JUMP(jump_tab[0]);
+ } else if (select_val2 == $T2) {
+ $JUMP(jump_tab[1]);
+ } else {
+ $FAIL($Fail);
+ }
+}
+
+i_select_tuple_arity := select_val_lin.fetch.get_arity.execute;
+i_select_val_lins := select_val_lin.fetch.execute;
+
+select_val_lin.head() {
+ Eterm select_val;
+}
+
+select_val_lin.fetch(Src) {
+ select_val = $Src;
+}
+
+select_val_lin.get_arity() {
+ if (ERTS_LIKELY(is_tuple(select_val))) {
+ select_val = *tuple_val(select_val);
+ } else {
+ select_val = NIL;
+ }
+}
+
+select_val_lin.execute(Fail, N) {
+ BeamInstr* vs = $NEXT_INSTRUCTION;
+ int ix = 0;
+
+ for (;;) {
+ if (vs[ix+0] >= select_val) {
+ ix += 0;
+ break;
+ }
+ if (vs[ix+1] >= select_val) {
+ ix += 1;
+ break;
+ }
+ ix += 2;
+ }
+
+ if (vs[ix] == select_val) {
+ Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION + $N);
+ Eterm offset = jump_tab[ix];
+ $JUMP(offset);
+ } else {
+ $JUMP($Fail);
+ }
+}
+
+JUMP_ON_VAL(Fail, Index, N, Base) {
+ if (is_small($Index)) {
+ $Index = (Uint) (signed_val($Index) - $Base);
+ if ($Index < $N) {
+ Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION);
+ $JUMP(jump_tab[$Index]);
+ }
+ }
+ $FAIL($Fail);
+}
+
+i_jump_on_val_zero := jump_on_val_zero.fetch.execute;
+
+jump_on_val_zero.head() {
+ Eterm index;
+}
+
+jump_on_val_zero.fetch(Src) {
+ index = $Src;
+}
+
+jump_on_val_zero.execute(Fail, N) {
+ $JUMP_ON_VAL($Fail, index, $N, 0);
+}
+
+i_jump_on_val := jump_on_val.fetch.execute;
+
+jump_on_val.head() {
+ Eterm index;
+}
+
+jump_on_val.fetch(Src) {
+ index = $Src;
+}
+
+jump_on_val.execute(Fail, N, Base) {
+ $JUMP_ON_VAL($Fail, index, $N, $Base);
+}
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index dfe82cab44..869a575cb4 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@
#ifndef __SYS_H__
#define __SYS_H__
-#if !defined(__GNUC__)
+#if !defined(__GNUC__) || defined(__e2k__)
# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0
#elif !defined(__GNUC_MINOR__)
# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
@@ -34,9 +34,6 @@
(((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
#endif
-#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ERTS_SMP)
-# error "Dirty schedulers not supported without smp support"
-#endif
#ifdef ERTS_INLINE
# ifndef ERTS_CAN_INLINE
@@ -99,6 +96,12 @@
#define ErtsContainerStruct(ptr, type, member) \
((type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member)))
+/* Use this variant when the member is an array */
+#define ErtsContainerStruct_(ptr, type, memberv) \
+ ((type *)((char *)(1 ? (ptr) : ((type *)0)->memberv) - offsetof(type, memberv)))
+
+#define ErtsSizeofMember(type, member) sizeof(((type *)0)->member)
+
#if defined (__WIN32__)
# include "erl_win_sys.h"
#else
@@ -215,12 +218,6 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f
# define ASSERT(e) ((void) 1)
#endif
-#ifdef ERTS_SMP
-# define ERTS_SMP_ASSERT(e) ASSERT(e)
-#else
-# define ERTS_SMP_ASSERT(e) ((void)1)
-#endif
-
/* ERTS_UNDEF can be used to silence false warnings about
* "variable may be used uninitialized" while keeping the variable
* marked as undefined by valgrind.
@@ -321,33 +318,36 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f
#endif
#if SIZEOF_VOID_P == SIZEOF_LONG
-typedef unsigned long Eterm;
-typedef unsigned long Uint;
-typedef long Sint;
+typedef unsigned long Eterm erts_align_attribute(sizeof(long));
+typedef unsigned long Uint erts_align_attribute(sizeof(long));
+typedef long Sint erts_align_attribute(sizeof(long));
#define SWORD_CONSTANT(Const) Const##L
#define UWORD_CONSTANT(Const) Const##UL
#define ERTS_UWORD_MAX ULONG_MAX
#define ERTS_SWORD_MAX LONG_MAX
+#define ERTS_SWORD_MIN LONG_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_LONG
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_INT
-typedef unsigned int Eterm;
-typedef unsigned int Uint;
-typedef int Sint;
+typedef unsigned int Eterm erts_align_attribute(sizeof(int));
+typedef unsigned int Uint erts_align_attribute(sizeof(int));
+typedef int Sint erts_align_attribute(sizeof(int));
#define SWORD_CONSTANT(Const) Const
#define UWORD_CONSTANT(Const) Const##U
#define ERTS_UWORD_MAX UINT_MAX
#define ERTS_SWORD_MAX INT_MAX
+#define ERTS_SWORD_MIN INT_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_INT
#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
-typedef unsigned long long Eterm;
-typedef unsigned long long Uint;
-typedef long long Sint;
+typedef unsigned long long Eterm erts_align_attribute(sizeof(long long));
+typedef unsigned long long Uint erts_align_attribute(sizeof(long long));
+typedef long long Sint erts_align_attribute(sizeof(long long));
#define SWORD_CONSTANT(Const) Const##LL
#define UWORD_CONSTANT(Const) Const##ULL
#define ERTS_UWORD_MAX ULLONG_MAX
#define ERTS_SWORD_MAX LLONG_MAX
+#define ERTS_SWORD_MIN LLONG_MIN
#define ERTS_SIZEOF_ETERM SIZEOF_LONG_LONG
#if defined(__WIN32__)
#define ErtsStrToSint _strtoi64
@@ -369,28 +369,12 @@ typedef UWord BeamInstr;
# define HAVE_INT64 1
typedef unsigned long Uint64;
typedef long Sint64;
-# ifdef ULONG_MAX
-# define ERTS_UINT64_MAX ULONG_MAX
-# endif
-# ifdef LONG_MAX
-# define ERTS_SINT64_MAX LONG_MAX
-# endif
-# ifdef LONG_MIN
-# define ERTS_SINT64_MIN LONG_MIN
-# endif
+# define ErtsStrToSint64 strtol
# elif SIZEOF_LONG_LONG == 8
# define HAVE_INT64 1
typedef unsigned long long Uint64;
typedef long long Sint64;
-# ifdef ULLONG_MAX
-# define ERTS_UINT64_MAX ULLONG_MAX
-# endif
-# ifdef LLONG_MAX
-# define ERTS_SINT64_MAX LLONG_MAX
-# endif
-# ifdef LLONG_MIN
-# define ERTS_SINT64_MIN LLONG_MIN
-# endif
+# define ErtsStrToSint64 strtoll
# else
# error "No 64-bit integer type found"
# endif
@@ -403,7 +387,7 @@ typedef long long Sint64;
# define ERTS_SINT64_MAX ((Sint64) ((((Uint64) 1) << 63)-1))
#endif
#ifndef ERTS_SINT64_MIN
-# define ERTS_SINT64_MIN (-1*(((Sint64) 1) << 63))
+# define ERTS_SINT64_MIN ((Sint64) ((((Uint64) 1) << 63)))
#endif
#if SIZEOF_LONG == 4
@@ -416,6 +400,16 @@ typedef int Sint32;
#error Found no appropriate type to use for 'Uint32' and 'Sint32'
#endif
+#ifndef ERTS_UINT32_MAX
+# define ERTS_UINT32_MAX (~((Uint32) 0))
+#endif
+#ifndef ERTS_SINT32_MAX
+# define ERTS_SINT32_MAX ((Sint32) ((((Uint32) 1) << 31)-1))
+#endif
+#ifndef ERTS_SINT32_MIN
+# define ERTS_SINT32_MIN ((Sint32) ((((Uint32) 1) << 31)))
+#endif
+
#if SIZEOF_INT == 2
typedef unsigned int Uint16;
typedef int Sint16;
@@ -426,6 +420,16 @@ typedef short Sint16;
#error Found no appropriate type to use for 'Uint16' and 'Sint16'
#endif
+#ifndef ERTS_UINT16_MAX
+# define ERTS_UINT16_MAX (~((Uint16) 0))
+#endif
+#ifndef ERTS_SINT16_MAX
+# define ERTS_SINT16_MAX ((Sint16) ((((Uint16) 1) << 15)-1))
+#endif
+#ifndef ERTS_SINT16_MIN
+# define ERTS_SINT16_MIN ((Sint16) ((((Uint16) 1) << 15)))
+#endif
+
#if CHAR_BIT == 8
typedef unsigned char byte;
#else
@@ -462,49 +466,25 @@ typedef union {
#include "erl_lock_check.h"
-/* needed by erl_smp.h */
+/* needed by erl_threads.h */
int erts_send_warning_to_logger_str_nogl(char *);
-#include "erl_smp.h"
+#include "erl_threads.h"
#ifdef ERTS_WANT_BREAK_HANDLING
-# ifdef ERTS_SMP
-extern erts_smp_atomic32_t erts_break_requested;
+extern erts_atomic32_t erts_break_requested;
# define ERTS_BREAK_REQUESTED \
- ((int) erts_smp_atomic32_read_nob(&erts_break_requested))
-# else
-extern volatile int erts_break_requested;
-# define ERTS_BREAK_REQUESTED erts_break_requested
-# endif
+ ((int) erts_atomic32_read_nob(&erts_break_requested))
void erts_do_break_handling(void);
#endif
-#ifdef ERTS_WANT_GOT_SIGUSR1
-# ifndef UNIX
-# define ERTS_GOT_SIGUSR1 0
-# else
-# ifdef ERTS_SMP
-extern erts_smp_atomic32_t erts_got_sigusr1;
-# define ERTS_GOT_SIGUSR1 ((int) erts_smp_atomic32_read_mb(&erts_got_sigusr1))
-# else
-extern volatile int erts_got_sigusr1;
-# define ERTS_GOT_SIGUSR1 erts_got_sigusr1
-# endif
-# endif
-#endif
-#ifdef ERTS_SMP
-extern erts_smp_atomic32_t erts_writing_erl_crash_dump;
+extern erts_atomic32_t erts_writing_erl_crash_dump;
extern erts_tsd_key_t erts_is_crash_dumping_key;
#define ERTS_SOMEONE_IS_CRASH_DUMPING \
- ((int) erts_smp_atomic32_read_mb(&erts_writing_erl_crash_dump))
+ ((int) erts_atomic32_read_mb(&erts_writing_erl_crash_dump))
#define ERTS_IS_CRASH_DUMPING \
((int) (SWord) erts_tsd_get(erts_is_crash_dumping_key))
-#else
-extern volatile int erts_writing_erl_crash_dump;
-#define ERTS_SOMEONE_IS_CRASH_DUMPING erts_writing_erl_crash_dump
-#define ERTS_IS_CRASH_DUMPING erts_writing_erl_crash_dump
-#endif
/* Deal with memcpy() vs bcopy() etc. We want to use the mem*() functions,
but be able to fall back on bcopy() etc on systems that don't have
@@ -600,7 +580,7 @@ __decl_noreturn void __noreturn erts_exit(int n, char*, ...);
erts_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Internal error: %s\n", \
__FILE__, __LINE__, __func__, What)
-Eterm erts_check_io_info(void *p);
+UWord erts_sys_get_page_size(void);
/* Size of misc memory allocated from system dependent code */
Uint erts_sys_misc_mem_sz(void);
@@ -610,22 +590,21 @@ Uint erts_sys_misc_mem_sz(void);
#include "erl_printf.h"
/* Io constants to erts_print and erts_putc */
-#define ERTS_PRINT_STDERR (2)
-#define ERTS_PRINT_STDOUT (1)
-#define ERTS_PRINT_FILE (-1)
-#define ERTS_PRINT_SBUF (-2)
-#define ERTS_PRINT_SNBUF (-3)
-#define ERTS_PRINT_DSBUF (-4)
-
-#define ERTS_PRINT_MIN ERTS_PRINT_DSBUF
+#define ERTS_PRINT_STDERR ((fmtfn_t)0)
+#define ERTS_PRINT_STDOUT ((fmtfn_t)1)
+#define ERTS_PRINT_FILE ((fmtfn_t)2)
+#define ERTS_PRINT_SBUF ((fmtfn_t)3)
+#define ERTS_PRINT_SNBUF ((fmtfn_t)4)
+#define ERTS_PRINT_DSBUF ((fmtfn_t)5)
+#define ERTS_PRINT_FD ((fmtfn_t)6)
typedef struct {
char *buf;
size_t size;
} erts_print_sn_buf;
-int erts_print(int to, void *arg, char *format, ...); /* in utils.c */
-int erts_putc(int to, void *arg, char); /* in utils.c */
+int erts_print(fmtfn_t to, void *arg, char *format, ...); /* in utils.c */
+int erts_putc(fmtfn_t to, void *arg, char); /* in utils.c */
/* logger stuff is declared here instead of in global.h, so sys files
won't have to include global.h */
@@ -642,7 +621,7 @@ int erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *);
int erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *);
int erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *);
int erts_send_info_to_logger_str_nogl(char *);
-/* needed by erl_smp.h (declared above)
+/* needed by erl_threads.h (declared above)
int erts_send_warning_to_logger_str_nogl(char *); */
int erts_send_error_to_logger_str_nogl(char *);
@@ -662,6 +641,8 @@ typedef struct preload {
*/
typedef Eterm ErtsTracer;
+#include "erl_osenv.h"
+
/*
* This structure contains options to all built in drivers.
* None of the drivers use all of the fields.
@@ -677,8 +658,7 @@ typedef struct _SysDriverOpts {
int hide_window; /* Hide this windows (Windows). */
int exit_status; /* Report exit status of subprocess. */
int overlapped_io; /* Only has effect on windows NT et al */
- char *envir; /* Environment of the port process, */
- /* in Windows format. */
+ erts_osenv_t envir; /* Environment of the port process */
char **argv; /* Argument vector in Unix'ish format. */
char *wd; /* Working directory. */
unsigned spawn_type; /* Bitfield of ERTS_SPAWN_DRIVER |
@@ -763,11 +743,7 @@ extern char *erts_sys_ddll_error(int code);
/*
* System interfaces for startup.
*/
-void erts_sys_schedule_interrupt(int set);
-#ifdef ERTS_SMP
-void erts_sys_schedule_interrupt_timed(int, ErtsMonotonicTime);
void erts_sys_main_thread(void);
-#endif
extern int erts_sys_prepare_crash_dump(int secs);
extern void erts_sys_pre_init(void);
@@ -783,10 +759,6 @@ Preload* sys_preloaded(void);
unsigned char* sys_preload_begin(Preload*);
void sys_preload_end(Preload*);
int sys_get_key(int);
-void elapsed_time_both(UWord *ms_user, UWord *ms_sys,
- UWord *ms_user_diff, UWord *ms_sys_diff);
-void wall_clock_elapsed_time_both(UWord *ms_total,
- UWord *ms_diff);
void get_time(int *hour, int *minute, int *second);
void get_date(int *year, int *month, int *day);
void get_localtime(int *year, int *month, int *day,
@@ -816,15 +788,12 @@ void set_break_quit(void (*)(void), void (*)(void));
void os_flavor(char*, unsigned);
void os_version(int*, int*, int*);
-void init_getenv_state(GETENV_STATE *);
-char * getenv_string(GETENV_STATE *);
-void fini_getenv_state(GETENV_STATE *);
#define HAVE_ERTS_CHECK_IO_DEBUG
typedef struct {
int no_used_fds;
int no_driver_select_structs;
- int no_driver_event_structs;
+ int no_enif_select_structs;
} ErtsCheckIoDebugInfo;
int erts_check_io_debug(ErtsCheckIoDebugInfo *ip);
@@ -839,29 +808,36 @@ int sys_double_to_chars_ext(double, char*, size_t, size_t);
int sys_double_to_chars_fast(double, char*, int, int, int);
void sys_get_pid(char *, size_t);
-/* erts_sys_putenv() returns, 0 on success and a value != 0 on failure. */
-int erts_sys_putenv(char *key, char *value);
-/* Simple variant used from drivers, raw eightbit interface */
-int erts_sys_putenv_raw(char *key, char *value);
-/* erts_sys_getenv() returns 0 on success (length of value string in
- *size), a value > 0 if value buffer is too small (*size is set to needed
- size), and a value < 0 on failure. */
-int erts_sys_getenv(char *key, char *value, size_t *size);
-/* Simple variant used from drivers, raw eightbit interface */
-int erts_sys_getenv_raw(char *key, char *value, size_t *size);
-/* erts_sys_getenv__() is only allowed to be used in early init phase */
-int erts_sys_getenv__(char *key, char *value, size_t *size);
-/* erst_sys_unsetenv() returns 0 on success and a value != 0 on failure. */
-int erts_sys_unsetenv(char *key);
+/* erl_drv_get/putenv have been implicitly 8-bit for so long that we can't
+ * change them without breaking things on Windows. Their return values are
+ * identical to erts_osenv_get/putenv */
+int erts_sys_explicit_8bit_getenv(char *key, char *value, size_t *size);
+int erts_sys_explicit_8bit_putenv(char *key, char *value);
+
+/* This is identical to erts_sys_explicit_8bit_getenv but falls down to the
+ * host OS implementation instead of erts_osenv. */
+int erts_sys_explicit_host_getenv(char *key, char *value, size_t *size);
+
+const erts_osenv_t *erts_sys_rlock_global_osenv(void);
+void erts_sys_runlock_global_osenv(void);
+
+erts_osenv_t *erts_sys_rwlock_global_osenv(void);
+void erts_sys_rwunlock_global_osenv(void);
/* Easier to use, but not as efficient, environment functions */
char *erts_read_env(char *key);
void erts_free_read_env(void *value);
-#if defined(ERTS_THR_HAVE_SIG_FUNCS) && !defined(ETHR_UNUSABLE_SIGUSRX)
+#if defined(ERTS_THR_HAVE_SIG_FUNCS) && \
+ (!defined(ETHR_UNUSABLE_SIGUSRX) || defined(SIGRTMIN))
extern void sys_thr_resume(erts_tid_t tid);
extern void sys_thr_suspend(erts_tid_t tid);
-#endif
+#ifdef SIGRTMIN
+#define ERTS_SYS_SUSPEND_SIGNAL (SIGRTMIN+1)
+#else
+#define ERTS_SYS_SUSPEND_SIGNAL (SIGUSR2)
+#endif /* SIGRTMIN */
+#endif /* HAVE_SIG_FUNCS */
/* utils.c */
@@ -893,10 +869,13 @@ void sys_alloc_stat(SysAllocStat *);
#define ERTS_REFC_DEBUG
#endif
-typedef erts_smp_atomic_t erts_refc_t;
+typedef erts_atomic_t erts_refc_t;
ERTS_GLB_INLINE void erts_refc_init(erts_refc_t *refcp, erts_aint_t val);
ERTS_GLB_INLINE void erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_refc_inc_unless(erts_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val);
ERTS_GLB_INLINE erts_aint_t erts_refc_inctest(erts_refc_t *refcp,
erts_aint_t min_val);
ERTS_GLB_INLINE void erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val);
@@ -912,27 +891,51 @@ ERTS_GLB_INLINE erts_aint_t erts_refc_read(erts_refc_t *refcp,
ERTS_GLB_INLINE void
erts_refc_init(erts_refc_t *refcp, erts_aint_t val)
{
- erts_smp_atomic_init_nob((erts_smp_atomic_t *) refcp, val);
+ erts_atomic_init_nob((erts_atomic_t *) refcp, val);
}
ERTS_GLB_INLINE void
erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_inc_read_nob((erts_atomic_t *) refcp);
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
"erts_refc_inc(): Bad refc found (refc=%ld < %ld)!\n",
val, min_val);
#else
- erts_smp_atomic_inc_nob((erts_smp_atomic_t *) refcp);
+ erts_atomic_inc_nob((erts_atomic_t *) refcp);
#endif
}
ERTS_GLB_INLINE erts_aint_t
+erts_refc_inc_unless(erts_refc_t *refcp,
+ erts_aint_t unless_val,
+ erts_aint_t min_val)
+{
+ erts_aint_t val = erts_atomic_read_nob((erts_atomic_t *) refcp);
+ while (1) {
+ erts_aint_t exp, new;
+#ifdef ERTS_REFC_DEBUG
+ if (val < 0)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_refc_inc_unless(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#endif
+ if (val == unless_val)
+ return val;
+ new = val + 1;
+ exp = val;
+ val = erts_atomic_cmpxchg_nob((erts_atomic_t *) refcp, new, exp);
+ if (val == exp)
+ return new;
+ }
+}
+
+ERTS_GLB_INLINE erts_aint_t
erts_refc_inctest(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_inc_read_nob((erts_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
@@ -946,20 +949,20 @@ ERTS_GLB_INLINE void
erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_dec_read_nob((erts_atomic_t *) refcp);
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
"erts_refc_dec(): Bad refc found (refc=%ld < %ld)!\n",
val, min_val);
#else
- erts_smp_atomic_dec_nob((erts_smp_atomic_t *) refcp);
+ erts_atomic_dec_nob((erts_atomic_t *) refcp);
#endif
}
ERTS_GLB_INLINE erts_aint_t
erts_refc_dectest(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_dec_read_nob((erts_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
@@ -973,20 +976,20 @@ ERTS_GLB_INLINE void
erts_refc_add(erts_refc_t *refcp, erts_aint_t diff, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_add_read_nob((erts_smp_atomic_t *) refcp, diff);
+ erts_aint_t val = erts_atomic_add_read_nob((erts_atomic_t *) refcp, diff);
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
"erts_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n",
diff, val, min_val);
#else
- erts_smp_atomic_add_nob((erts_smp_atomic_t *) refcp, diff);
+ erts_atomic_add_nob((erts_atomic_t *) refcp, diff);
#endif
}
ERTS_GLB_INLINE erts_aint_t
erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_read_nob((erts_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
@@ -996,22 +999,82 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
return val;
}
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#ifdef ERTS_ENABLE_KERNEL_POLL
-extern int erts_use_kernel_poll;
-#endif
-#define sys_memcpy(s1,s2,n) memcpy(s1,s2,n)
-#define sys_memmove(s1,s2,n) memmove(s1,s2,n)
-#define sys_memcmp(s1,s2,n) memcmp(s1,s2,n)
-#define sys_memset(s,c,n) memset(s,c,n)
-#define sys_memzero(s, n) memset(s,'\0',n)
-#define sys_strcmp(s1,s2) strcmp(s1,s2)
-#define sys_strncmp(s1,s2,n) strncmp(s1,s2,n)
-#define sys_strcpy(s1,s2) strcpy(s1,s2)
-#define sys_strncpy(s1,s2,n) strncpy(s1,s2,n)
-#define sys_strlen(s) strlen(s)
+/* Thin wrappers around memcpy and friends, which should always be used in
+ * place of plain memcpy, memset, etc.
+ *
+ * Passing NULL to any of these functions is undefined behavior even though it
+ * may seemingly work when the length (if any) is zero; a compiler can take
+ * this as a hint that the passed operand may *never* be NULL and then optimize
+ * based on that information.
+ */
+ERTS_GLB_INLINE void *sys_memcpy(void *dest, const void *src, size_t n);
+ERTS_GLB_INLINE void *sys_memmove(void *dest, const void *src, size_t n);
+ERTS_GLB_INLINE int sys_memcmp(const void *s1, const void *s2, size_t n);
+ERTS_GLB_INLINE void *sys_memset(void *s, int c, size_t n);
+ERTS_GLB_INLINE void *sys_memzero(void *s, size_t n);
+ERTS_GLB_INLINE int sys_strcmp(const char *s1, const char *s2);
+ERTS_GLB_INLINE int sys_strncmp(const char *s1, const char *s2, size_t n);
+ERTS_GLB_INLINE char *sys_strcpy(char *dest, const char *src);
+ERTS_GLB_INLINE char *sys_strncpy(char *dest, const char *src, size_t n);
+ERTS_GLB_INLINE size_t sys_strlen(const char *s);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void *sys_memcpy(void *dest, const void *src, size_t n)
+{
+ ASSERT(dest != NULL && src != NULL);
+ return memcpy(dest,src,n);
+}
+ERTS_GLB_INLINE void *sys_memmove(void *dest, const void *src, size_t n)
+{
+ ASSERT(dest != NULL && src != NULL);
+ return memmove(dest,src,n);
+}
+ERTS_GLB_INLINE int sys_memcmp(const void *s1, const void *s2, size_t n)
+{
+ ASSERT(s1 != NULL && s2 != NULL);
+ return memcmp(s1,s2,n);
+}
+ERTS_GLB_INLINE void *sys_memset(void *s, int c, size_t n)
+{
+ ASSERT(s != NULL);
+ return memset(s,c,n);
+}
+ERTS_GLB_INLINE void *sys_memzero(void *s, size_t n)
+{
+ ASSERT(s != NULL);
+ return memset(s,'\0',n);
+}
+ERTS_GLB_INLINE int sys_strcmp(const char *s1, const char *s2)
+{
+ ASSERT(s1 != NULL && s2 != NULL);
+ return strcmp(s1,s2);
+}
+ERTS_GLB_INLINE int sys_strncmp(const char *s1, const char *s2, size_t n)
+{
+ ASSERT(s1 != NULL && s2 != NULL);
+ return strncmp(s1,s2,n);
+}
+ERTS_GLB_INLINE char *sys_strcpy(char *dest, const char *src)
+{
+ ASSERT(dest != NULL && src != NULL);
+ return strcpy(dest,src);
+
+}
+ERTS_GLB_INLINE char *sys_strncpy(char *dest, const char *src, size_t n)
+{
+ ASSERT(dest != NULL && src != NULL);
+ return strncpy(dest,src,n);
+}
+ERTS_GLB_INLINE size_t sys_strlen(const char *s)
+{
+ ASSERT(s != NULL);
+ return strlen(s);
+}
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
/* define function symbols (needed in sys_drv_api) */
#define sys_fp_alloc sys_alloc
@@ -1107,6 +1170,14 @@ void erl_bin_write(unsigned char *, int, int);
# define DEBUGF(x)
#endif
+#ifndef MAX
+#define MAX(A, B) ((A) > (B) ? (A) : (B))
+#endif
+
+#ifndef MIN
+#define MIN(A, B) ((A) < (B) ? (A) : (B))
+#endif
+
#ifdef __WIN32__
#ifdef ARCH_64
#define ERTS_ALLOC_ALIGN_BYTES 16
@@ -1172,4 +1243,52 @@ int erts_get_printable_characters(void);
void erts_init_sys_common_misc(void);
+ERTS_GLB_INLINE Sint erts_raw_env_7bit_ascii_char_need(int encoding);
+ERTS_GLB_INLINE byte *erts_raw_env_7bit_ascii_char_put(byte c, byte *p,
+ int encoding);
+ERTS_GLB_INLINE int erts_raw_env_char_is_7bit_ascii_char(byte c, byte *p,
+ int encoding);
+ERTS_GLB_INLINE byte *erts_raw_env_next_char(byte *p, int encoding);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Sint
+erts_raw_env_7bit_ascii_char_need(int encoding)
+{
+ return (encoding == ERL_FILENAME_WIN_WCHAR) ? 2 : 1;
+}
+
+ERTS_GLB_INLINE byte *
+erts_raw_env_7bit_ascii_char_put(byte c,
+ byte *p,
+ int encoding)
+{
+ *(p++) = c;
+ if (encoding == ERL_FILENAME_WIN_WCHAR)
+ *(p++) = 0;
+ return p;
+}
+
+ERTS_GLB_INLINE int
+erts_raw_env_char_is_7bit_ascii_char(byte c,
+ byte *p,
+ int encoding)
+{
+ if (encoding == ERL_FILENAME_WIN_WCHAR)
+ return (p[0] == c) & (p[1] == 0);
+ else
+ return p[0] == c;
+}
+
+ERTS_GLB_INLINE byte *
+erts_raw_env_next_char(byte *p, int encoding)
+{
+ if (encoding == ERL_FILENAME_WIN_WCHAR)
+ return p + 2;
+ else
+ return p + 1;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#endif
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index 6f15082130..a3069e419a 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,57 +17,157 @@
*
* %CopyrightEnd%
*/
-
+
/*
- * TIMING WHEEL
+ * TIMER WHEEL
+ *
+ *
+ * The time scale used for timers is Erlang monotonic time. The
+ * time unit used is ERTS specific clock ticks. A clock tick is
+ * currently defined to 1 millisecond. That is, the resolution of
+ * timers triggered by the runtime system is 1 millisecond.
*
- * Timeouts kept in an wheel. A timeout is measured relative to the
- * current slot (tiw_pos) in the wheel, and inserted at slot
- * (tiw_pos + timeout) % TIW_SIZE. Each timeout also has a count
- * equal to timeout/TIW_SIZE, which is needed since the time axis
- * is wrapped arount the wheel.
+ * When a timer is set, it is determined at what Erlang monotonic
+ * time, in clock ticks, it should be triggered.
*
- * Several slots may be processed in one operation. If the number of
- * slots is greater that the wheel size, the wheel is only traversed
- * once,
+ * The 'pos' field of the wheel corresponds to current time of
+ * the wheel. That is, it corresponds to Erlang monotonic time in
+ * clock tick time unit. The 'pos' field of the wheel is
+ * monotonically increased when erts_bump_timers() is called. All
+ * timers in the wheel that have a time less than or equal to
+ * 'pos' are triggered by the bump operation. The bump operation
+ * may however be spread over multiple calls to erts_bump_timers()
+ * if there are a lots of timers to trigger.
*
- * The following example shows a time axis where there is one timeout
- * at each "tick", and where 1, 2, 3 ... wheel slots are released in
- * one operation. The notation "<x" means "release all items with
- * counts less than x".
+ * Each scheduler thread maintains its own timer wheel. The timer
+ * wheel of a scheduler, however, actually consists of two wheels.
+ * A soon wheel and a later wheel.
+ *
+ *
+ * -- The Soon Wheel --
+ *
+ * The soon wheel contain timers that should be triggered soon.
+ * That is, they are soon to be triggered. Each slot in the soon
+ * wheel is 1 clock tick wide. The number of slots in the soon
+ * wheel is currently 2¹⁴. That is, it contains timers in the
+ * range ('pos', 'pos' + 2¹⁴] which corresponds to a bit more
+ * than 16 seconds.
+ *
+ * When the bump operation is started, 'pos' is moved forward to a
+ * position that corresponds to current Erlang monotonic time. Then
+ * all timers that are in the range (old 'pos', new 'pos'] are
+ * triggered. During a bump operation, the soon wheel may contain
+ * timers in the two, possibly overlapping, ranges (old 'pos',
+ * old 'pos' + 2¹⁴], and (new 'pos', new 'pos' + 2¹⁴]. This may
+ * occur even if the bump operation doesn't yield, due to timeout
+ * callbacks inserting new timers.
+ *
+ *
+ * -- The Later Wheel --
+ *
+ * The later wheel contain timers that are further away from 'pos'
+ * than the width of the soon timer wheel. That is, currently
+ * timers further away from 'pos' than 2¹⁴ clock ticks. The width
+ * of each slot in the later wheel is half the width of the soon
+ * wheel. That is, each slot is currently 2¹³ clock ticks wide
+ * which corresponds to about 8 seconds. If three timers of the
+ * times 'pos' + 17000, 'pos' + 18000, and 'pos' + 19000 are
+ * inserted, they will all end up in the same slot in the later
+ * wheel.
+ *
+ * The number of slots in the later wheel is currently the same as
+ * in the soon wheel, i.e. 2¹⁴. That is, one revolution of the later
+ * wheel currently corresponds to 2¹⁴×2¹³ clock ticks which is
+ * almost 37 ½ hour. Timers even further away than that are put in
+ * the later slot identified by their time modulo the size of the later
+ * wheel. Such timers are however very uncommon. Most timers used
+ * by the runtime system will utilize the high level timer API.
+ * The high level timer implementation will not insert timers
+ * further away then one revolution into the later wheel. It will
+ * instead keep such timers in a tree of very long timers. The
+ * high level timer implementation utilize one timer wheel timer
+ * for the management of this tree of timers. This timer is set to
+ * the closest timeout in the tree. This timer may however be
+ * further away than one revolution in the later wheel.
+ *
+ * The 'later.pos' field identifies next position in the later wheel.
+ * 'later.pos' is always increased by the width of a later wheel slot.
+ * That is, currently 2¹³ clock ticks. When 'pos' is moved (during
+ * a bump operation) closer to 'later.pos' than the width of a later
+ * wheel slot, i.e. currently when 'pos' + 2¹³ ≥ 'later.pos', we
+ * inspect the slot identified by 'later.pos' and then move 'later.pos'
+ * forward. When inspecting the later slot we move all timers in the
+ * slot, that are in the soon wheel range, from the later wheel to
+ * the soon wheel. Timers one or more revolutions of the later wheel
+ * away are kept in the slot.
+ *
+ * During normal operation, timers originally located in the later
+ * wheel will currently be moved into the soon wheel about 8 to
+ * 16 seconds before they should be triggered. During extremely
+ * heavy load, the scheduler might however be heavily delayed, so
+ * the code must be prepared for situations where time for
+ * triggering the timer has passed when we inspect the later wheel
+ * slot, and then trigger the timer immediately. We must also be
+ * prepared to inspect multiple later wheel slots at once due to the
+ * delay.
+ *
+ *
+ * -- Slot Management --
+ *
+ * All timers of a slot are placed in a circular double linked
+ * list. This makes insertion and removal of a timer O(1).
+ *
+ * While bumping timers in a slot, we move the circular list
+ * away from the slot, and refer to it from the 'sentinel'
+ * field. The list will stay there until we are done with it
+ * even if the bump operation should yield. The cancel operation
+ * can remove the timer from this position as well as from the
+ * slot position by just removing it from the circular double
+ * linked list that it is in.
+ *
+ * -- At Once Slot --
+ *
+ * If a timer is set that has a time earlier or equal to 'pos',
+ * it is not inserted into the wheel. It is instead inserted,
+ * into a circular double linked list referred to by the "at
+ * once" slot. When the bump operation is performed these timers
+ * will be triggered at once. The circular list of the slot will
+ * be moved to the 'sentinel' field while bumping these timers
+ * as when bumping an ordinary wheel slot. A yielding bump
+ * operation and cancelation of timers is handled the same way
+ * as if the timer was in a wheel slot.
+ *
+ * -- Searching for Next Timeout --
+ *
+ * In order to limit the amount of work needed in order to find
+ * next timeout, we keep track of total amount of timers in the
+ * wheels, total amount of timers in the later wheel, total amount
+ * of timers in soon wheel, and the total amount of timers in
+ * each range of slots. Each slot range currently contain 512
+ * slots.
+ *
+ * When next timeout is less than the soon wheel width away we
+ * determine the exact timeout. Due to the timer counts of
+ * slot ranges, we currently at most need to search 1024 slots
+ * in the soon wheel. This besides inspecting slot range counts
+ * and two slots in the later wheel which potentially might trigger
+ * timeouts for moving timers from the later wheel to the soon wheel
+ * earlier than timeouts in the soon wheel. We also keep track
+ * of latest known minimum timeout position in each wheel which
+ * makes it possible to avoid scanning from current position
+ * each time.
+ *
+ * When next timeout is further away than the soon wheel width
+ * we settle for the earliest possible timeout in the first
+ * non-empty slot range. The further away the next timeout is, the
+ * more likely it is that the next timeout change before we
+ * actually get there. That is, a change due to another timer is
+ * set to an earlier time and/or the timer is cancelled. It is
+ * therefore in this case no point determining next timeout
+ * exactly. If the state should not change, we will wake up a bit
+ * early and do a recalculation of next timeout and eventually
+ * we will be so close to it that we determine it exactly.
*
- * Size of wheel: 4
- *
- * --|----|----|----|----|----|----|----|----|----|----|----|----|----
- * 0.0 0.1 0.2 0.3 1.0 1.1 1.2 1.3 2.0 2.1 2.2 2.3 3.0
- *
- * 1 [ )
- * <1 0.1 0.2 0.3 0.0 1.1 1.2 1.3 1.0 2.1 2.2 2.3 2.0
- *
- * 2 [ )
- * <1 <1 0.2 0.3 0.0 0.1 1.2 1.3 1.0 1.1 2.2 2.3 2.0
- *
- * 3 [ )
- * <1 <1 <1 0.3 0.0 0.1 0.2 1.3 1.0 1.1 1.2 2.3 2.0
- *
- * 4 [ )
- * <1 <1 <1 <1 0.0 0.1 0.2 0.3 1.0 1.1 1.2 1.3 2.0
- *
- * 5 [ )
- * <2 <1 <1 <1. 0.1 0.2 0.3 0.0 1.1 1.2 1.3 1.0
- *
- * 6 [ )
- * <2 <2 <1 <1. 0.2 0.3 0.0 0.1 1.2 1.3 1.0
- *
- * 7 [ )
- * <2 <2 <2 <1. 0.3 0.0 0.1 0.2 1.3 1.0
- *
- * 8 [ )
- * <2 <2 <2 <2. 0.0 0.1 0.2 0.3 1.0
- *
- * 9 [ )
- * <3 <2 <2 <2. 0.1 0.2 0.3 0.0
- *
*/
#ifdef HAVE_CONFIG_H
@@ -80,8 +180,11 @@
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
-#define ERTS_MONOTONIC_DAY ERTS_SEC_TO_MONOTONIC(60*60*24)
-#define ERTS_CLKTCKS_DAY ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY)
+#define ERTS_MAX_CLKTCKS \
+ ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_TIME_MAX)
+
+#define ERTS_CLKTCKS_WEEK \
+ ERTS_MONOTONIC_TO_CLKTCKS(ERTS_SEC_TO_MONOTONIC(7*60*60*24))
#ifdef ERTS_ENABLE_LOCK_CHECK
#define ASSERT_NO_LOCKED_LOCKS erts_lc_check_exact(NULL, 0)
@@ -90,6 +193,10 @@
#endif
#if 0
+# define ERTS_TW_HARD_DEBUG
+#endif
+
+#if defined(ERTS_TW_HARD_DEBUG) && !defined(ERTS_TW_DEBUG)
# define ERTS_TW_DEBUG
#endif
#if defined(DEBUG) && !defined(ERTS_TW_DEBUG)
@@ -97,16 +204,62 @@
#endif
#undef ERTS_TW_ASSERT
-#if defined(ERTS_TW_DEBUG)
+#if defined(ERTS_TW_DEBUG)
# define ERTS_TW_ASSERT(E) ERTS_ASSERT(E)
#else
# define ERTS_TW_ASSERT(E) ((void) 1)
#endif
#ifdef ERTS_TW_DEBUG
-# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 5
+# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 500
#else
-# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 100
+# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 10000
+#endif
+#define ERTS_TW_COST_SLOT 1
+#define ERTS_TW_COST_SLOT_MOVE 5
+#define ERTS_TW_COST_TIMEOUT 100
+
+/*
+ * Every slot in the soon wheel is a clock tick (as defined
+ * by ERTS) wide. A clock tick is currently 1 milli second.
+ */
+
+#define ERTS_TW_SOON_WHEEL_FIRST_SLOT 0
+#define ERTS_TW_SOON_WHEEL_END_SLOT \
+ (ERTS_TW_SOON_WHEEL_FIRST_SLOT + ERTS_TW_SOON_WHEEL_SIZE)
+
+#define ERTS_TW_SOON_WHEEL_MASK (ERTS_TW_SOON_WHEEL_SIZE-1)
+
+/*
+ * Every slot in the later wheel is as wide as half the size
+ * of the soon wheel.
+ */
+
+#define ERTS_TW_LATER_WHEEL_SHIFT (ERTS_TW_SOON_WHEEL_BITS - 1)
+#define ERTS_TW_LATER_WHEEL_SLOT_SIZE \
+ ((ErtsMonotonicTime) (1 << ERTS_TW_LATER_WHEEL_SHIFT))
+#define ERTS_TW_LATER_WHEEL_POS_MASK \
+ (~((ErtsMonotonicTime) (1 << ERTS_TW_LATER_WHEEL_SHIFT)-1))
+
+#define ERTS_TW_LATER_WHEEL_FIRST_SLOT ERTS_TW_SOON_WHEEL_SIZE
+#define ERTS_TW_LATER_WHEEL_END_SLOT \
+ (ERTS_TW_LATER_WHEEL_FIRST_SLOT + ERTS_TW_LATER_WHEEL_SIZE)
+
+#define ERTS_TW_LATER_WHEEL_MASK (ERTS_TW_LATER_WHEEL_SIZE-1)
+
+#define ERTS_TW_SCNT_BITS 9
+#define ERTS_TW_SCNT_SHIFT
+#define ERTS_TW_SCNT_SIZE \
+ ((ERTS_TW_SOON_WHEEL_SIZE + ERTS_TW_LATER_WHEEL_SIZE) \
+ >> ERTS_TW_SCNT_BITS)
+
+#ifdef __GNUC__
+#if ERTS_TW_SOON_WHEEL_BITS < ERTS_TW_SCNT_BITS
+# warning Consider larger soon timer wheel
+#endif
+#if ERTS_TW_SOON_WHEEL_BITS < ERTS_TW_SCNT_BITS
+# warning Consider larger later timer wheel
+#endif
#endif
/* Actual interval time chosen by sys_init_time() */
@@ -119,95 +272,360 @@ static int tiw_itime; /* Constant after init */
# define TIW_ITIME tiw_itime
#endif
+const int etp_tw_soon_wheel_size = ERTS_TW_SOON_WHEEL_SIZE;
+const ErtsMonotonicTime etp_tw_soon_wheel_mask = ERTS_TW_SOON_WHEEL_MASK;
+const int etp_tw_soon_wheel_first_slot = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+
+const int etp_tw_later_wheel_size = ERTS_TW_LATER_WHEEL_SIZE;
+const ErtsMonotonicTime etp_tw_later_wheel_slot_size = ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+const int etp_tw_later_wheel_shift = ERTS_TW_LATER_WHEEL_SHIFT;
+const ErtsMonotonicTime etp_tw_later_wheel_mask = ERTS_TW_LATER_WHEEL_MASK;
+const ErtsMonotonicTime etp_tw_later_wheel_pos_mask = ERTS_TW_LATER_WHEEL_POS_MASK;
+const int etp_tw_later_wheel_first_slot = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+
struct ErtsTimerWheel_ {
- ErtsTWheelTimer *w[ERTS_TIW_SIZE];
+ ErtsTWheelTimer *slots[1 /* At Once Slot */
+ + ERTS_TW_SOON_WHEEL_SIZE /* Soon Wheel Slots */
+ + ERTS_TW_LATER_WHEEL_SIZE]; /* Later Wheel Slots */
+ ErtsTWheelTimer **w;
+ Sint scnt[ERTS_TW_SCNT_SIZE];
+ Sint bump_scnt[ERTS_TW_SCNT_SIZE];
ErtsMonotonicTime pos;
Uint nto;
struct {
- ErtsTWheelTimer *head;
- ErtsTWheelTimer *tail;
Uint nto;
} at_once;
+ struct {
+ ErtsMonotonicTime min_tpos;
+ Uint nto;
+ } soon;
+ struct {
+ ErtsMonotonicTime min_tpos;
+ int min_tpos_slot;
+ ErtsMonotonicTime pos;
+ Uint nto;
+ } later;
int yield_slot;
int yield_slots_left;
- int yield_start_pos;
ErtsTWheelTimer sentinel;
int true_next_timeout_time;
+ ErtsMonotonicTime next_timeout_pos;
ErtsMonotonicTime next_timeout_time;
};
-static ERTS_INLINE ErtsMonotonicTime
-find_next_timeout(ErtsSchedulerData *esdp,
- ErtsTimerWheel *tiw,
- int search_all,
- ErtsMonotonicTime curr_time, /* When !search_all */
- ErtsMonotonicTime max_search_time) /* When !search_all */
+#define ERTS_TW_SLOT_AT_ONCE (-1)
+
+#define ERTS_TW_BUMP_LATER_WHEEL(TIW) \
+ ((tiw)->pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE >= (TIW)->later.pos)
+
+static int bump_later_wheel(ErtsTimerWheel *tiw, int *yield_count_p);
+
+#ifdef ERTS_TW_DEBUG
+#define ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(TIW, TO_POS) \
+ dbg_verify_empty_soon_slots((TIW), (TO_POS))
+#define ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(TIW, TO_POS) \
+ dbg_verify_empty_later_slots((TIW), (TO_POS))
+void dbg_verify_empty_soon_slots(ErtsTimerWheel *, ErtsMonotonicTime);
+void dbg_verify_empty_later_slots(ErtsTimerWheel *, ErtsMonotonicTime);
+#else
+#define ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(TIW, TO_POS)
+#define ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(TIW, TO_POS)
+#endif
+
+static ERTS_INLINE int
+scnt_get_ix(int slot)
{
- int start_ix, tiw_pos_ix;
- ErtsTWheelTimer *p;
+ return slot >> ERTS_TW_SCNT_BITS;
+}
+
+static ERTS_INLINE void
+scnt_inc(Sint *scnt, int slot)
+{
+ scnt[slot >> ERTS_TW_SCNT_BITS]++;
+}
+
+#ifdef ERTS_TW_HARD_DEBUG
+
+static ERTS_INLINE void
+scnt_ix_inc(Sint *scnt, int six)
+{
+ scnt[six]++;
+}
+
+#endif
+
+static ERTS_INLINE void
+scnt_dec(Sint *scnt, int slot)
+{
+ scnt[slot >> ERTS_TW_SCNT_BITS]--;
+ ERTS_TW_ASSERT(scnt[slot >> ERTS_TW_SCNT_BITS] >= 0);
+}
+
+static ERTS_INLINE void
+scnt_ix_dec(Sint *scnt, int six)
+{
+ scnt[six]--;
+ ERTS_TW_ASSERT(scnt[six] >= 0);
+}
+
+static ERTS_INLINE void
+scnt_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt, int first_slot,
+ int end_slot, ErtsMonotonicTime slot_sz)
+{
+ int slot = *slotp;
+ int left = *leftp;
+ int ix;
+
+ ERTS_TW_ASSERT(*leftp >= 0);
+
+ left--;
+ slot++;
+ if (slot == end_slot)
+ slot = first_slot;
+ ix = slot >> ERTS_TW_SCNT_BITS;
+
+ while (!scnt[ix] && left > 0) {
+ int diff, old_slot = slot;
+ ix++;
+ slot = (ix << ERTS_TW_SCNT_BITS);
+ diff = slot - old_slot;
+ if (left < diff) {
+ slot = old_slot + left;
+ diff = left;
+ }
+ if (slot < end_slot)
+ left -= diff;
+ else {
+ left -= end_slot - old_slot;
+ slot = first_slot;
+ ix = slot >> ERTS_TW_SCNT_BITS;
+ }
+ }
+
+ ERTS_TW_ASSERT(left >= -1);
+
+ if (posp)
+ *posp += slot_sz * ((ErtsMonotonicTime) (*leftp - left));
+ if (sixp)
+ *sixp = slot >> ERTS_TW_SCNT_BITS;
+ *leftp = left;
+ *slotp = slot;
+}
+
+
+static ERTS_INLINE void
+scnt_soon_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt)
+{
+ scnt_wheel_next(slotp, leftp, posp, sixp, scnt,
+ ERTS_TW_SOON_WHEEL_FIRST_SLOT,
+ ERTS_TW_SOON_WHEEL_END_SLOT, 1);
+}
+
+static ERTS_INLINE void
+scnt_later_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt)
+{
+ scnt_wheel_next(slotp, leftp, posp, sixp, scnt,
+ ERTS_TW_LATER_WHEEL_FIRST_SLOT,
+ ERTS_TW_LATER_WHEEL_END_SLOT,
+ ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+}
+
+
+static ERTS_INLINE int
+soon_slot(ErtsMonotonicTime soon_pos)
+{
+ ErtsMonotonicTime slot = soon_pos;
+ slot &= ERTS_TW_SOON_WHEEL_MASK;
+
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT <= slot);
+ ERTS_TW_ASSERT(slot < ERTS_TW_SOON_WHEEL_END_SLOT);
+
+ return (int) slot;
+}
+
+static ERTS_INLINE int
+later_slot(ErtsMonotonicTime later_pos)
+{
+ ErtsMonotonicTime slot = later_pos;
+ slot >>= ERTS_TW_LATER_WHEEL_SHIFT;
+ slot &= ERTS_TW_LATER_WHEEL_MASK;
+ slot += ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+
+ ERTS_TW_ASSERT(ERTS_TW_LATER_WHEEL_FIRST_SLOT <= slot);
+ ERTS_TW_ASSERT(slot < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ return (int) slot;
+}
+
+#ifdef ERTS_TW_HARD_DEBUG
+#define ERTS_HARD_DBG_CHK_WHEELS(TIW, CHK_MIN_TPOS) \
+ hrd_dbg_check_wheels((TIW), (CHK_MIN_TPOS))
+static void hrd_dbg_check_wheels(ErtsTimerWheel *tiw, int check_min_tpos);
+#else
+#define ERTS_HARD_DBG_CHK_WHEELS(TIW, CHK_MIN_TPOS)
+#endif
+
+static ErtsMonotonicTime
+find_next_timeout(ErtsSchedulerData *esdp, ErtsTimerWheel *tiw)
+{
+ int slot, slots;
int true_min_timeout = 0;
- ErtsMonotonicTime min_timeout, min_timeout_pos, slot_timeout_pos;
+ ErtsMonotonicTime min_timeout_pos;
+
+ ERTS_TW_ASSERT(tiw->pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE < tiw->later.pos
+ && tiw->later.pos <= tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ ERTS_TW_ASSERT(tiw->yield_slot == ERTS_TW_SLOT_INACTIVE);
if (tiw->nto == 0) { /* no timeouts in wheel */
- if (!search_all)
- min_timeout_pos = tiw->pos;
- else {
- curr_time = erts_get_monotonic_time(esdp);
- tiw->pos = min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
- }
- min_timeout_pos += ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY);
- goto found_next;
+ ErtsMonotonicTime curr_time = erts_get_monotonic_time(esdp);
+ tiw->pos = min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ tiw->later.pos = min_timeout_pos + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ min_timeout_pos += ERTS_CLKTCKS_WEEK;
+ goto done;
}
- slot_timeout_pos = min_timeout_pos = tiw->pos;
- if (search_all)
- min_timeout_pos += ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY);
- else
- min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time + max_search_time);
+ ERTS_TW_ASSERT(tiw->soon.nto || tiw->later.nto);
- start_ix = tiw_pos_ix = (int) (tiw->pos & (ERTS_TIW_SIZE-1));
+ if (!tiw->soon.nto) {
+ ErtsMonotonicTime tpos, min_tpos;
- do {
- if (++slot_timeout_pos >= min_timeout_pos)
- break;
-
- p = tiw->w[tiw_pos_ix];
-
- if (p) {
- ErtsTWheelTimer *end = p;
-
- do {
- ErtsMonotonicTime timeout_pos;
- timeout_pos = p->timeout_pos;
- if (min_timeout_pos > timeout_pos) {
- true_min_timeout = 1;
- min_timeout_pos = timeout_pos;
- if (min_timeout_pos <= slot_timeout_pos)
- goto found_next;
- }
- p = p->next;
- } while (p != end);
- }
+ /* Search later wheel... */
+
+ min_tpos = tiw->later.min_tpos & ERTS_TW_LATER_WHEEL_POS_MASK;
+
+ if (min_tpos <= tiw->later.pos) {
+ tpos = tiw->later.pos;
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+ }
+ else {
+ ErtsMonotonicTime tmp;
+ /* Don't inspect slots we know are empty... */
+ tmp = min_tpos - tiw->later.pos;
+ tmp /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmp >= ERTS_TW_LATER_WHEEL_SIZE) {
+ /* Timeout more than one revolution ahead... */
+
+ /* Pre-timeout for move from later to soon wheel... */
+ min_timeout_pos = min_tpos - ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ goto done;
+ }
+ tpos = min_tpos;
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, min_tpos);
+ slots = ERTS_TW_LATER_WHEEL_SIZE - ((int) tmp);
+ }
+
+ slot = later_slot(tpos);
+
+ /*
+ * We never search for an exact timeout in the
+ * later wheel, but instead settle for the first
+ * scnt range used.
+ */
+ if (tiw->w[slot])
+ true_min_timeout = 1;
+ else
+ scnt_later_wheel_next(&slot, &slots, &tpos, NULL, tiw->scnt);
+
+ tiw->later.min_tpos = tpos;
+ tiw->later.min_tpos_slot = slot;
+ ERTS_TW_ASSERT(slot == later_slot(tpos));
+
+ /* Pre-timeout for move from later to soon wheel... */
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ min_timeout_pos = tpos;
+ }
+ else {
+ ErtsMonotonicTime tpos;
+ /* Search soon wheel... */
+
+ min_timeout_pos = tiw->pos + ERTS_TW_SOON_WHEEL_SIZE;
+
+ /*
+ * Besides inspecting the soon wheel we
+ * may also have to inspect two slots in the
+ * later wheel which potentially can trigger
+ * timeouts before timeouts in soon wheel...
+ */
+ if (tiw->later.min_tpos > (tiw->later.pos
+ + 2*ERTS_TW_LATER_WHEEL_SLOT_SIZE)) {
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(
+ tiw, 2*ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+ }
+ else {
+ int fslot;
+ tpos = tiw->later.pos;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ fslot = later_slot(tiw->later.pos);
+ if (tiw->w[fslot])
+ min_timeout_pos = tpos;
+ else {
+ tpos += ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tpos < min_timeout_pos) {
+ fslot++;
+ if (fslot == ERTS_TW_LATER_WHEEL_END_SLOT)
+ fslot = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ if (tiw->w[fslot])
+ min_timeout_pos = tpos;
+ }
+ }
+ }
+
+ if (tiw->soon.min_tpos <= tiw->pos) {
+ tpos = tiw->pos;
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
+ }
+ else {
+ ErtsMonotonicTime tmp;
+ /* Don't inspect slots we know are empty... */
+ tmp = tiw->soon.min_tpos - tiw->pos;
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_SIZE > tmp);
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, tiw->soon.min_tpos);
+ slots = ERTS_TW_SOON_WHEEL_SIZE - ((int) tmp);
+ tpos = tiw->soon.min_tpos;
+ }
+
+ slot = soon_slot(tpos);
+
+ /* find next non-empty slot */
+ while (tpos < min_timeout_pos) {
+ if (tiw->w[slot]) {
+ ERTS_TW_ASSERT(tiw->w[slot]->timeout_pos == tpos);
+ min_timeout_pos = tpos;
+ break;
+ }
+ scnt_soon_wheel_next(&slot, &slots, &tpos, NULL, tiw->scnt);
+ }
- tiw_pos_ix++;
- if (tiw_pos_ix == ERTS_TIW_SIZE)
- tiw_pos_ix = 0;
- } while (start_ix != tiw_pos_ix);
+ tiw->soon.min_tpos = min_timeout_pos;
+ true_min_timeout = 1;
+ }
+
+done: {
+ ErtsMonotonicTime min_timeout;
-found_next:
+ min_timeout = ERTS_CLKTCKS_TO_MONOTONIC(min_timeout_pos);
+ tiw->next_timeout_pos = min_timeout_pos;
+ tiw->next_timeout_time = min_timeout;
+ tiw->true_next_timeout_time = true_min_timeout;
- min_timeout = ERTS_CLKTCKS_TO_MONOTONIC(min_timeout_pos);
- tiw->next_timeout_time = min_timeout;
- tiw->true_next_timeout_time = true_min_timeout;
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 1);
- return min_timeout;
+ return min_timeout;
+ }
}
static ERTS_INLINE void
insert_timer_into_slot(ErtsTimerWheel *tiw, int slot, ErtsTWheelTimer *p)
{
- ERTS_TW_ASSERT(slot >= 0);
- ERTS_TW_ASSERT(slot < ERTS_TIW_SIZE);
+ ERTS_TW_ASSERT(ERTS_TW_SLOT_AT_ONCE <= slot
+ && slot < ERTS_TW_LATER_WHEEL_END_SLOT);
p->slot = slot;
if (!tiw->w[slot]) {
tiw->w[slot] = p;
@@ -223,55 +641,89 @@ insert_timer_into_slot(ErtsTimerWheel *tiw, int slot, ErtsTWheelTimer *p)
prev->next = p;
next->prev = p;
}
+ if (slot == ERTS_TW_SLOT_AT_ONCE)
+ tiw->at_once.nto++;
+ else {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ if (slot < ERTS_TW_SOON_WHEEL_END_SLOT) {
+ ERTS_TW_ASSERT(p->timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ tiw->soon.nto++;
+ if (tiw->soon.min_tpos > tpos)
+ tiw->soon.min_tpos = tpos;
+ }
+ else {
+ ERTS_TW_ASSERT(p->timeout_pos >= tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ tiw->later.nto++;
+ if (tiw->later.min_tpos > tpos) {
+ tiw->later.min_tpos = tpos;
+ tiw->later.min_tpos_slot = slot;
+ }
+ }
+ scnt_inc(tiw->scnt, slot);
+ }
}
static ERTS_INLINE void
remove_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
{
int slot = p->slot;
- ERTS_TW_ASSERT(slot != ERTS_TWHEEL_SLOT_INACTIVE);
-
- if (slot >= 0) {
- /*
- * Timer in wheel or in circular
- * list of timers currently beeing
- * triggered (referred by sentinel).
- */
- ERTS_TW_ASSERT(slot < ERTS_TIW_SIZE);
-
- if (p->next == p) {
- ERTS_TW_ASSERT(tiw->w[slot] == p);
- tiw->w[slot] = NULL;
- }
- else {
- if (tiw->w[slot] == p)
- tiw->w[slot] = p->next;
- p->prev->next = p->next;
- p->next->prev = p->prev;
- }
+ int empty_slot;
+ ERTS_TW_ASSERT(slot != ERTS_TW_SLOT_INACTIVE);
+
+ /*
+ * Timer is in circular list either referred to
+ * by at once slot, slot in soon wheel, slot
+ * in later wheel, or by sentinel (timers currently
+ * being triggered).
+ */
+ ERTS_TW_ASSERT(ERTS_TW_SLOT_AT_ONCE <= slot
+ && slot < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ if (p->next == p) {
+ /* Cannot be referred by sentinel, i.e. must be referred by slot... */
+ ERTS_TW_ASSERT(tiw->w[slot] == p);
+ tiw->w[slot] = NULL;
+ empty_slot = 1;
}
else {
- /* Timer in "at once" queue... */
- ERTS_TW_ASSERT(slot == ERTS_TWHEEL_SLOT_AT_ONCE);
- if (p->prev)
- p->prev->next = p->next;
- else {
- ERTS_TW_ASSERT(tiw->at_once.head == p);
- tiw->at_once.head = p->next;
- }
- if (p->next)
- p->next->prev = p->prev;
- else {
- ERTS_TW_ASSERT(tiw->at_once.tail == p);
- tiw->at_once.tail = p->prev;
- }
+ if (tiw->w[slot] == p)
+ tiw->w[slot] = p->next;
+ p->prev->next = p->next;
+ p->next->prev = p->prev;
+ empty_slot = 0;
+ }
+ if (slot == ERTS_TW_SLOT_AT_ONCE) {
ERTS_TW_ASSERT(tiw->at_once.nto > 0);
tiw->at_once.nto--;
}
-
- p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
-
- tiw->nto--;
+ else {
+ scnt_dec(tiw->scnt, slot);
+ if (slot < ERTS_TW_SOON_WHEEL_END_SLOT) {
+ if (empty_slot
+ && tiw->true_next_timeout_time
+ && p->timeout_pos == tiw->next_timeout_pos) {
+ tiw->true_next_timeout_time = 0;
+ }
+ if (--tiw->soon.nto == 0)
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ }
+ else {
+ if (empty_slot
+ && tiw->true_next_timeout_time
+ && tiw->later.min_tpos_slot == slot) {
+ ErtsMonotonicTime tpos = tiw->later.min_tpos;
+ tpos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tpos == tiw->next_timeout_pos)
+ tiw->true_next_timeout_time = 0;
+ }
+ if (--tiw->later.nto == 0) {
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ }
+ }
+ }
+ p->slot = ERTS_TW_SLOT_INACTIVE;
}
ErtsMonotonicTime
@@ -280,58 +732,26 @@ erts_check_next_timeout_time(ErtsSchedulerData *esdp)
ErtsTimerWheel *tiw = esdp->timer_wheel;
ErtsMonotonicTime time;
ERTS_MSACC_DECLARE_CACHE_X();
+ ERTS_TW_ASSERT(tiw->next_timeout_time
+ == ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos));
if (tiw->true_next_timeout_time)
- return tiw->next_timeout_time;
+ return tiw->next_timeout_time; /* known timeout... */
+ if (tiw->next_timeout_pos > tiw->pos + ERTS_TW_SOON_WHEEL_SIZE)
+ return tiw->next_timeout_time; /* sufficiently later away... */
ERTS_MSACC_PUSH_AND_SET_STATE_CACHED_X(ERTS_MSACC_STATE_TIMERS);
- time = find_next_timeout(esdp, tiw, 1, 0, 0);
+ time = find_next_timeout(esdp, tiw);
ERTS_MSACC_POP_STATE_M_X();
return time;
}
-#ifndef ERTS_TW_DEBUG
-#define ERTS_DBG_CHK_SAFE_TO_SKIP_TO(TIW, TO) ((void) 0)
-#else
-#define ERTS_DBG_CHK_SAFE_TO_SKIP_TO(TIW, TO) debug_check_safe_to_skip_to((TIW), (TO))
-static void
-debug_check_safe_to_skip_to(ErtsTimerWheel *tiw, ErtsMonotonicTime skip_to_pos)
-{
- int slots, ix;
- ErtsTWheelTimer *tmr;
- ErtsMonotonicTime tmp;
-
- ix = (int) (tiw->pos & (ERTS_TIW_SIZE-1));
- tmp = skip_to_pos - tiw->pos;
- ERTS_TW_ASSERT(tmp >= 0);
- if (tmp < (ErtsMonotonicTime) ERTS_TIW_SIZE)
- slots = (int) tmp;
- else
- slots = ERTS_TIW_SIZE;
-
- while (slots > 0) {
- tmr = tiw->w[ix];
- if (tmr) {
- ErtsTWheelTimer *end = tmr;
- do {
- ERTS_TW_ASSERT(tmr->timeout_pos > skip_to_pos);
- tmr = tmr->next;
- } while (tmr != end);
- }
- ix++;
- if (ix == ERTS_TIW_SIZE)
- ix = 0;
- slots--;
- }
-}
-#endif
-
static ERTS_INLINE void
timeout_timer(ErtsTWheelTimer *p)
{
ErlTimeoutProc timeout;
void *arg;
- p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
- timeout = p->u.func.timeout;
- arg = p->u.func.arg;
+ p->slot = ERTS_TW_SLOT_INACTIVE;
+ timeout = p->timeout;
+ arg = p->arg;
(*timeout)(arg);
ASSERT_NO_LOCKED_LOCKS;
}
@@ -339,73 +759,108 @@ timeout_timer(ErtsTWheelTimer *p)
void
erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
{
- int tiw_pos_ix, slots, yielded_slot_restarted, yield_count;
- ErtsMonotonicTime bump_to, tmp_slots, old_pos;
+ int slot, restarted, yield_count, slots, scnt_ix;
+ ErtsMonotonicTime bump_to;
+ Sint *scnt, *bump_scnt;
ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
yield_count = ERTS_TWHEEL_BUMP_YIELD_LIMIT;
+ scnt = &tiw->scnt[0];
+ bump_scnt = &tiw->bump_scnt[0];
+
/*
* In order to be fair we always continue with work
* where we left off when restarting after a yield.
*/
- if (tiw->yield_slot >= 0) {
- yielded_slot_restarted = 1;
- tiw_pos_ix = tiw->yield_slot;
- slots = tiw->yield_slots_left;
+ slot = tiw->yield_slot;
+ restarted = slot != ERTS_TW_SLOT_INACTIVE;
+ if (restarted) {
bump_to = tiw->pos;
- old_pos = tiw->yield_start_pos;
- goto restart_yielded_slot;
+ if (slot >= ERTS_TW_LATER_WHEEL_FIRST_SLOT)
+ goto restart_yielded_later_slot;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
+ if (slot == ERTS_TW_SLOT_AT_ONCE)
+ goto restart_yielded_at_once_slot;
+ scnt_ix = scnt_get_ix(slot);
+ slots = tiw->yield_slots_left;
+ ASSERT(0 <= slots && slots <= ERTS_TW_SOON_WHEEL_SIZE);
+ goto restart_yielded_soon_slot;
}
do {
- yielded_slot_restarted = 0;
-
+ restarted = 0;
bump_to = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ tiw->true_next_timeout_time = 1;
+ tiw->next_timeout_pos = bump_to;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(bump_to);
while (1) {
ErtsTWheelTimer *p;
- old_pos = tiw->pos;
-
if (tiw->nto == 0) {
empty_wheel:
- ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, bump_to);
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, bump_to);
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, bump_to);
tiw->true_next_timeout_time = 0;
- tiw->next_timeout_time = curr_time + ERTS_MONOTONIC_DAY;
+ tiw->next_timeout_pos = bump_to + ERTS_CLKTCKS_WEEK;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos);;
tiw->pos = bump_to;
- tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ tiw->later.pos = bump_to + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
ERTS_MSACC_POP_STATE_M_X();
return;
}
- p = tiw->at_once.head;
- while (p) {
- if (--yield_count <= 0) {
- ERTS_TW_ASSERT(tiw->nto > 0);
- ERTS_TW_ASSERT(tiw->at_once.nto > 0);
- tiw->yield_slot = ERTS_TWHEEL_SLOT_AT_ONCE;
- tiw->true_next_timeout_time = 1;
- tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(old_pos);
- ERTS_MSACC_POP_STATE_M_X();
- return;
- }
+ p = tiw->w[ERTS_TW_SLOT_AT_ONCE];
+
+ if (p) {
+
+ if (p->next == p) {
+ ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ }
+ else {
+ tiw->sentinel.next = p->next;
+ tiw->sentinel.prev = p->prev;
+ tiw->sentinel.next->prev = &tiw->sentinel;
+ tiw->sentinel.prev->next = &tiw->sentinel;
+ }
+ tiw->w[ERTS_TW_SLOT_AT_ONCE] = NULL;
+
+ while (1) {
+ ERTS_TW_ASSERT(tiw->nto > 0);
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->nto--;
+ tiw->at_once.nto--;
+
+ timeout_timer(p);
+
+ yield_count -= ERTS_TW_COST_TIMEOUT;
- ERTS_TW_ASSERT(tiw->nto > 0);
- ERTS_TW_ASSERT(tiw->at_once.nto > 0);
- tiw->nto--;
- tiw->at_once.nto--;
- tiw->at_once.head = p->next;
- if (p->next)
- p->next->prev = NULL;
- else
- tiw->at_once.tail = NULL;
+ restart_yielded_at_once_slot:
- timeout_timer(p);
+ p = tiw->sentinel.next;
+ if (p == &tiw->sentinel) {
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ break;
+ }
+
+ if (yield_count <= 0) {
+ ERTS_TW_ASSERT(tiw->nto > 0);
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->yield_slot = ERTS_TW_SLOT_AT_ONCE;
+ ERTS_MSACC_POP_STATE_M_X();
+ return; /* Yield! */
+ }
+
+ tiw->sentinel.next = p->next;
+ p->next->prev = &tiw->sentinel;
+ }
- p = tiw->at_once.head;
}
if (tiw->pos >= bump_to) {
@@ -416,39 +871,66 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
if (tiw->nto == 0)
goto empty_wheel;
- if (tiw->true_next_timeout_time) {
- ErtsMonotonicTime skip_until_pos;
+ /*
+ * Save slot counts in bump operation local
+ * array.
+ *
+ * The amount of timers to trigger (or move)
+ * will only decrease from now until we have
+ * completed this bump operation (even if we
+ * yield in the middle of it).
+ *
+ * The amount of timers in the wheels may
+ * however increase due to timers being set
+ * by timeout callbacks.
+ */
+ sys_memcpy((void *) bump_scnt, (void *) scnt,
+ sizeof(Sint) * ERTS_TW_SCNT_SIZE);
+
+ if (tiw->soon.min_tpos > tiw->pos) {
+ ErtsMonotonicTime skip_until_pos = tiw->soon.min_tpos;
+
/*
* No need inspecting slots where we know no timeouts
* to trigger should reside.
*/
- skip_until_pos = ERTS_MONOTONIC_TO_CLKTCKS(tiw->next_timeout_time);
if (skip_until_pos > bump_to)
skip_until_pos = bump_to;
skip_until_pos--;
if (skip_until_pos > tiw->pos) {
- ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, skip_until_pos);
-
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, skip_until_pos);
tiw->pos = skip_until_pos;
}
}
- tiw_pos_ix = (int) ((tiw->pos+1) & (ERTS_TIW_SIZE-1));
- tmp_slots = (bump_to - tiw->pos);
- if (tmp_slots < (ErtsMonotonicTime) ERTS_TIW_SIZE)
- slots = (int) tmp_slots;
- else
- slots = ERTS_TIW_SIZE;
+ {
+ ErtsMonotonicTime tmp_slots = bump_to - tiw->pos;
+ tmp_slots = (bump_to - tiw->pos);
+ if (tmp_slots < ERTS_TW_SOON_WHEEL_SIZE)
+ slots = (int) tmp_slots;
+ else
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
+ }
+ slot = soon_slot(tiw->pos+1);
tiw->pos = bump_to;
+ tiw->next_timeout_pos = bump_to;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(bump_to);
+
+ scnt_ix = scnt_get_ix(slot);
+
+ /* Timeout timers in soon wheel */
while (slots > 0) {
- p = tiw->w[tiw_pos_ix];
+ yield_count -= ERTS_TW_COST_SLOT;
+
+ p = tiw->w[slot];
if (p) {
+ /* timeout callback need tiw->pos to be up to date */
if (p->next == p) {
ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
@@ -459,22 +941,28 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
tiw->sentinel.next->prev = &tiw->sentinel;
tiw->sentinel.prev->next = &tiw->sentinel;
}
- tiw->w[tiw_pos_ix] = NULL;
+ tiw->w[slot] = NULL;
while (1) {
- if (p->timeout_pos > bump_to) {
- /* Very unusual case... */
- ++yield_count;
- insert_timer_into_slot(tiw, tiw_pos_ix, p);
- }
- else {
- /* Normal case... */
- timeout_timer(p);
- tiw->nto--;
- }
-
- restart_yielded_slot:
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT <= p->slot
+ && p->slot < ERTS_TW_SOON_WHEEL_END_SLOT);
+ if (--tiw->soon.nto == 0)
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ scnt_ix_dec(scnt, scnt_ix);
+ if (p->timeout_pos <= bump_to) {
+ timeout_timer(p);
+ tiw->nto--;
+ scnt_ix_dec(bump_scnt, scnt_ix);
+ yield_count -= ERTS_TW_COST_TIMEOUT;
+ }
+ else {
+ /* uncommon case */
+ insert_timer_into_slot(tiw, slot, p);
+ yield_count -= ERTS_TW_COST_SLOT_MOVE;
+ }
+
+ restart_yielded_soon_slot:
p = tiw->sentinel.next;
if (p == &tiw->sentinel) {
@@ -482,12 +970,9 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
break;
}
- if (--yield_count <= 0) {
- tiw->true_next_timeout_time = 1;
- tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(old_pos);
- tiw->yield_slot = tiw_pos_ix;
+ if (yield_count <= 0) {
+ tiw->yield_slot = slot;
tiw->yield_slots_left = slots;
- tiw->yield_start_pos = old_pos;
ERTS_MSACC_POP_STATE_M_X();
return; /* Yield! */
}
@@ -496,24 +981,166 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
p->next->prev = &tiw->sentinel;
}
}
- tiw_pos_ix++;
- if (tiw_pos_ix == ERTS_TIW_SIZE)
- tiw_pos_ix = 0;
- slots--;
+
+ scnt_soon_wheel_next(&slot, &slots, NULL, &scnt_ix, bump_scnt);
}
+
+ if (ERTS_TW_BUMP_LATER_WHEEL(tiw)) {
+ restart_yielded_later_slot:
+ if (bump_later_wheel(tiw, &yield_count))
+ return; /* Yield! */
+ }
}
- } while (yielded_slot_restarted);
+ } while (restarted);
- tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
tiw->true_next_timeout_time = 0;
- tiw->next_timeout_time = curr_time + ERTS_MONOTONIC_DAY;
+ ERTS_TW_ASSERT(tiw->next_timeout_pos == bump_to);
- /* Search at most two seconds ahead... */
- (void) find_next_timeout(NULL, tiw, 0, curr_time, ERTS_SEC_TO_MONOTONIC(2));
+ (void) find_next_timeout(NULL, tiw);
ERTS_MSACC_POP_STATE_M_X();
}
+static int
+bump_later_wheel(ErtsTimerWheel *tiw, int *ycount_p)
+{
+ ErtsMonotonicTime cpos = tiw->pos;
+ ErtsMonotonicTime later_pos = tiw->later.pos;
+ int ycount = *ycount_p;
+ int slots, fslot, scnt_ix;
+ Sint *scnt, *bump_scnt;
+
+ scnt = &tiw->scnt[0];
+ bump_scnt = &tiw->bump_scnt[0];
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ if (tiw->yield_slot >= ERTS_TW_LATER_WHEEL_FIRST_SLOT) {
+ fslot = tiw->yield_slot;
+ scnt_ix = scnt_get_ix(fslot);
+ slots = tiw->yield_slots_left;
+ ASSERT(0 <= slots && slots <= ERTS_TW_LATER_WHEEL_SIZE);
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
+ goto restart_yielded_slot;
+ }
+ else {
+ ErtsMonotonicTime end_later_pos, tmp_slots, min_tpos;
+
+ min_tpos = tiw->later.min_tpos & ERTS_TW_LATER_WHEEL_POS_MASK;
+ end_later_pos = cpos + ERTS_TW_SOON_WHEEL_SIZE;
+ end_later_pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+
+ /* Skip known empty slots... */
+ if (min_tpos > later_pos) {
+ if (min_tpos > end_later_pos) {
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, end_later_pos);
+ tiw->later.pos = end_later_pos;
+ goto done;
+ }
+ later_pos = min_tpos;
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, later_pos);
+ }
+
+ tmp_slots = end_later_pos;
+ tmp_slots -= later_pos;
+ tmp_slots /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmp_slots < ERTS_TW_LATER_WHEEL_SIZE)
+ slots = (int) tmp_slots;
+ else
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+
+ fslot = later_slot(later_pos);
+ scnt_ix = scnt_get_ix(fslot);
+
+ tiw->later.pos = end_later_pos;
+ }
+
+ while (slots > 0) {
+ ErtsTWheelTimer *p;
+
+ ycount -= ERTS_TW_COST_SLOT;
+
+ p = tiw->w[fslot];
+
+ if (p) {
+
+ if (p->next == p) {
+ ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ }
+ else {
+ tiw->sentinel.next = p->next;
+ tiw->sentinel.prev = p->prev;
+ tiw->sentinel.next->prev = &tiw->sentinel;
+ tiw->sentinel.prev->next = &tiw->sentinel;
+ }
+ tiw->w[fslot] = NULL;
+
+ while (1) {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+
+ ERTS_TW_ASSERT(p->slot == fslot);
+
+ if (--tiw->later.nto == 0) {
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ }
+ scnt_ix_dec(scnt, scnt_ix);
+
+ if (tpos >= tiw->later.pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE) {
+ /* keep in later slot; very uncommon... */
+ insert_timer_into_slot(tiw, fslot, p);
+ ycount -= ERTS_TW_COST_SLOT_MOVE;
+ }
+ else {
+ scnt_ix_dec(bump_scnt, scnt_ix);
+ ERTS_TW_ASSERT(tpos < cpos + ERTS_TW_SOON_WHEEL_SIZE);
+ if (tpos > cpos) {
+ /* move into soon wheel */
+ insert_timer_into_slot(tiw, soon_slot(tpos), p);
+ ycount -= ERTS_TW_COST_SLOT_MOVE;
+ }
+ else {
+ /* trigger at once */
+ timeout_timer(p);
+ tiw->nto--;
+ ycount -= ERTS_TW_COST_TIMEOUT;
+ }
+ }
+
+ restart_yielded_slot:
+
+ p = tiw->sentinel.next;
+ if (p == &tiw->sentinel) {
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ break;
+ }
+
+ if (ycount < 0) {
+ tiw->yield_slot = fslot;
+ tiw->yield_slots_left = slots;
+ *ycount_p = 0;
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+ return 1; /* Yield! */
+ }
+
+ tiw->sentinel.next = p->next;
+ p->next->prev = &tiw->sentinel;
+ }
+ }
+
+ scnt_later_wheel_next(&fslot, &slots, NULL, &scnt_ix, bump_scnt);
+ }
+
+done:
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ *ycount_p = ycount;
+
+ return 0;
+}
+
Uint
erts_timer_wheel_memory_size(void)
{
@@ -526,25 +1153,51 @@ erts_create_timer_wheel(ErtsSchedulerData *esdp)
ErtsMonotonicTime mtime;
int i;
ErtsTimerWheel *tiw;
+
+ /* Some compile time sanity checks... */
+
+ /* Slots... */
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_AT_ONCE == -1);
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_INACTIVE < ERTS_TW_SLOT_AT_ONCE);
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_AT_ONCE + 1 == ERTS_TW_SOON_WHEEL_FIRST_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT < ERTS_TW_SOON_WHEEL_END_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_END_SLOT == ERTS_TW_LATER_WHEEL_FIRST_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_LATER_WHEEL_FIRST_SLOT < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ /* Both wheel sizes should be a powers of 2 */
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_SIZE
+ && !(ERTS_TW_SOON_WHEEL_SIZE & (ERTS_TW_SOON_WHEEL_SIZE-1)));
+ ERTS_CT_ASSERT(ERTS_TW_LATER_WHEEL_SIZE
+ && !(ERTS_TW_LATER_WHEEL_SIZE & (ERTS_TW_LATER_WHEEL_SIZE-1)));
+
tiw = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_WHEEL,
sizeof(ErtsTimerWheel));
- for(i = 0; i < ERTS_TIW_SIZE; i++)
+ tiw->w = &tiw->slots[1];
+ for(i = ERTS_TW_SLOT_AT_ONCE; i < ERTS_TW_LATER_WHEEL_END_SLOT; i++)
tiw->w[i] = NULL;
+ for (i = 0; i < ERTS_TW_SCNT_SIZE; i++)
+ tiw->scnt[i] = 0;
+
mtime = erts_get_monotonic_time(esdp);
tiw->pos = ERTS_MONOTONIC_TO_CLKTCKS(mtime);
tiw->nto = 0;
- tiw->at_once.head = NULL;
- tiw->at_once.tail = NULL;
tiw->at_once.nto = 0;
- tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->soon.nto = 0;
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ tiw->later.pos = tiw->pos + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tiw->later.nto = 0;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
tiw->true_next_timeout_time = 0;
- tiw->next_timeout_time = mtime + ERTS_MONOTONIC_DAY;
+ tiw->next_timeout_pos = tiw->pos + ERTS_CLKTCKS_WEEK;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos);
tiw->sentinel.next = &tiw->sentinel;
tiw->sentinel.prev = &tiw->sentinel;
- tiw->sentinel.u.func.timeout = NULL;
- tiw->sentinel.u.func.cancel = NULL;
- tiw->sentinel.u.func.arg = NULL;
+ tiw->sentinel.timeout = NULL;
+ tiw->sentinel.arg = NULL;
return tiw;
}
@@ -577,53 +1230,56 @@ erts_init_time(int time_correction, ErtsTimeWarpMode time_warp_mode)
void
erts_twheel_set_timer(ErtsTimerWheel *tiw,
ErtsTWheelTimer *p, ErlTimeoutProc timeout,
- ErlCancelProc cancel, void *arg,
- ErtsMonotonicTime timeout_pos)
+ void *arg, ErtsMonotonicTime timeout_pos)
{
- ErtsMonotonicTime timeout_time;
+ int slot;
ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
- p->u.func.timeout = timeout;
- p->u.func.cancel = cancel;
- p->u.func.arg = arg;
+ p->timeout = timeout;
+ p->arg = arg;
+
+ ERTS_TW_ASSERT(p->slot == ERTS_TW_SLOT_INACTIVE);
- ERTS_TW_ASSERT(p->slot == ERTS_TWHEEL_SLOT_INACTIVE);
+ tiw->nto++;
+ /* calculate slot */
if (timeout_pos <= tiw->pos) {
- tiw->nto++;
- tiw->at_once.nto++;
- p->next = NULL;
- p->prev = tiw->at_once.tail;
- if (tiw->at_once.tail) {
- ERTS_TW_ASSERT(tiw->at_once.head);
- tiw->at_once.tail->next = p;
- }
- else {
- ERTS_TW_ASSERT(!tiw->at_once.head);
- tiw->at_once.head = p;
- }
- tiw->at_once.tail = p;
- p->timeout_pos = tiw->pos;
- p->slot = ERTS_TWHEEL_SLOT_AT_ONCE;
- timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->pos);
+ /* at once */
+ p->timeout_pos = timeout_pos = tiw->pos;
+ slot = ERTS_TW_SLOT_AT_ONCE;
+ }
+ else if (timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE) {
+ /* soon wheel */
+ p->timeout_pos = timeout_pos;
+ slot = soon_slot(timeout_pos);
+ if (tiw->soon.min_tpos > timeout_pos)
+ tiw->soon.min_tpos = timeout_pos;
}
else {
- int slot;
-
- /* calculate slot */
- slot = (int) (timeout_pos & (ERTS_TIW_SIZE-1));
-
- insert_timer_into_slot(tiw, slot, p);
-
- tiw->nto++;
-
- timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(timeout_pos);
- p->timeout_pos = timeout_pos;
+ /* later wheel */
+ p->timeout_pos = timeout_pos;
+ slot = later_slot(timeout_pos);
+
+ /*
+ * Next timeout due to this timeout
+ * should be in good time before the
+ * actual timeout (one later wheel slot
+ * size). This, in order to move it
+ * from the later wheel to the soon
+ * wheel.
+ */
+ timeout_pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ timeout_pos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
}
- if (timeout_time < tiw->next_timeout_time) {
+ insert_timer_into_slot(tiw, slot, p);
+
+ if (timeout_pos <= tiw->next_timeout_pos) {
tiw->true_next_timeout_time = 1;
- tiw->next_timeout_time = timeout_time;
+ if (timeout_pos < tiw->next_timeout_pos) {
+ tiw->next_timeout_pos = timeout_pos;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(timeout_pos);
+ }
}
ERTS_MSACC_POP_STATE_M_X();
}
@@ -631,15 +1287,10 @@ erts_twheel_set_timer(ErtsTimerWheel *tiw,
void
erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
{
- if (p->slot != ERTS_TWHEEL_SLOT_INACTIVE) {
- ErlCancelProc cancel;
- void *arg;
+ if (p->slot != ERTS_TW_SLOT_INACTIVE) {
ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
remove_timer(tiw, p);
- cancel = p->u.func.cancel;
- arg = p->u.func.arg;
- if (cancel)
- (*cancel)(arg);
+ tiw->nto--;
ERTS_MSACC_POP_STATE_M_X();
}
}
@@ -657,22 +1308,17 @@ erts_twheel_debug_foreach(ErtsTimerWheel *tiw,
tmr = tiw->sentinel.next;
while (tmr != &tiw->sentinel) {
- if (tmr->u.func.timeout == tclbk)
- (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
+ if (tmr->timeout == tclbk)
+ (*func)(arg, tmr->timeout_pos, tmr->arg);
tmr = tmr->next;
}
- for (tmr = tiw->at_once.head; tmr; tmr = tmr->next) {
- if (tmr->u.func.timeout == tclbk)
- (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
- }
-
- for (ix = 0; ix < ERTS_TIW_SIZE; ix++) {
+ for (ix = ERTS_TW_SLOT_AT_ONCE; ix < ERTS_TW_LATER_WHEEL_END_SLOT; ix++) {
tmr = tiw->w[ix];
if (tmr) {
do {
- if (tmr->u.func.timeout == tclbk)
- (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
+ if (tmr->timeout == tclbk)
+ (*func)(arg, tmr->timeout_pos, tmr->arg);
tmr = tmr->next;
} while (tmr != tiw->w[ix]);
}
@@ -680,35 +1326,206 @@ erts_twheel_debug_foreach(ErtsTimerWheel *tiw,
}
#ifdef ERTS_TW_DEBUG
-void erts_p_slpq(void)
+
+void
+dbg_verify_empty_soon_slots(ErtsTimerWheel *tiw, ErtsMonotonicTime to_pos)
{
- erts_printf("Not yet implemented...\n");
-#if 0
- ErtsMonotonicTime current_time = erts_get_monotonic_time(NULL);
- int i;
- ErtsTWheelTimer* p;
-
- /* print the whole wheel, starting at the current position */
- erts_printf("\ncurrent time = %bps tiw_pos = %d tiw_nto %d\n",
- current_time, tiw->pos, tiw->nto);
- i = tiw->pos;
- if (tiw->w[i] != NULL) {
- erts_printf("%d:\n", i);
- for(p = tiw->w[i]; p != NULL; p = p->next) {
- erts_printf(" (timeout time %bps, slot %d)\n",
- ERTS_CLKTCKS_TO_MONOTONIC(p->timeout_pos),
- p->slot);
- }
+ int ix;
+ ErtsMonotonicTime tmp;
+
+ ix = soon_slot(tiw->pos);
+ tmp = to_pos;
+ if (tmp > tiw->pos) {
+ int slots;
+ tmp -= tiw->pos;
+ ERTS_TW_ASSERT(tmp > 0);
+ if (tmp < (ErtsMonotonicTime) ERTS_TW_SOON_WHEEL_SIZE)
+ slots = (int) tmp;
+ else
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
+
+ while (slots > 0) {
+ ERTS_TW_ASSERT(!tiw->w[ix]);
+ ix++;
+ if (ix == ERTS_TW_SOON_WHEEL_END_SLOT)
+ ix = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+ slots--;
+ }
+ }
+}
+
+void
+dbg_verify_empty_later_slots(ErtsTimerWheel *tiw, ErtsMonotonicTime to_pos)
+{
+ int ix;
+ ErtsMonotonicTime tmp;
+
+ ix = later_slot(tiw->later.pos);
+ tmp = to_pos;
+ tmp &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ if (tmp > tiw->later.pos) {
+ ErtsMonotonicTime pos_min;
+ int slots;
+ tmp -= tiw->later.pos;
+ tmp /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ ERTS_TW_ASSERT(tmp > 0);
+
+ pos_min = tiw->later.pos;
+
+ if (tmp < (ErtsMonotonicTime) ERTS_TW_LATER_WHEEL_SIZE)
+ slots = (int) tmp;
+ else {
+ pos_min += ((tmp / ERTS_TW_LATER_WHEEL_SIZE)
+ * ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+ }
+
+ while (slots > 0) {
+ ErtsTWheelTimer *tmr = tiw->w[ix];
+ pos_min += ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmr) {
+ ErtsTWheelTimer *end = tmr;
+ do {
+ ERTS_TW_ASSERT(tmr->timeout_pos >= pos_min);
+ tmr = tmr->next;
+ } while (tmr != end);
+ }
+ ix++;
+ if (ix == ERTS_TW_LATER_WHEEL_END_SLOT)
+ ix = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ slots--;
+ }
+ }
+}
+
+#endif /* ERTS_TW_DEBUG */
+
+#ifdef ERTS_TW_HARD_DEBUG
+
+static void
+hrd_dbg_check_wheels(ErtsTimerWheel *tiw, int check_min_tpos)
+{
+ int ix, six, soon_tmo, later_tmo, at_once_tmo,
+ scnt_slot, scnt_slots, scnt_six;
+ ErtsMonotonicTime min_tpos;
+ Sint scnt[ERTS_TW_SCNT_SIZE];
+ ErtsTWheelTimer *p;
+
+ for (six = 0; six < ERTS_TW_SCNT_SIZE; six++)
+ scnt[six] = 0;
+
+ min_tpos = ERTS_MONOTONIC_TO_CLKTCKS(tiw->next_timeout_time);
+
+ at_once_tmo = 0;
+ p = tiw->w[ERTS_TW_SLOT_AT_ONCE];
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ do {
+ at_once_tmo++;
+ ERTS_TW_ASSERT(p->slot == ERTS_TW_SLOT_AT_ONCE);
+ ERTS_TW_ASSERT(p->timeout_pos <= tiw->pos);
+ ERTS_TW_ASSERT(!check_min_tpos || tiw->pos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
}
- for(i = ((i+1) & (ERTS_TIW_SIZE-1)); i != (tiw->pos & (ERTS_TIW_SIZE-1)); i = ((i+1) & (ERTS_TIW_SIZE-1))) {
- if (tiw->w[i] != NULL) {
- erts_printf("%d:\n", i);
- for(p = tiw->w[i]; p != NULL; p = p->next) {
- erts_printf(" (timeout time %bps, slot %d)\n",
- ERTS_CLKTCKS_TO_MONOTONIC(p->timeout_pos), p->slot);
- }
- }
+
+ soon_tmo = 0;
+ scnt_slot = ERTS_TW_SOON_WHEEL_END_SLOT-1;
+ scnt_slots = ERTS_TW_SOON_WHEEL_SIZE;
+ scnt_six = 0;
+ scnt_soon_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ for (ix = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+ ix < ERTS_TW_SOON_WHEEL_END_SLOT;
+ ix++) {
+ p = tiw->w[ix];
+ six = scnt_get_ix(ix);
+ ERTS_TW_ASSERT(!p || six == scnt_six);
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ do {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ soon_tmo++;
+ scnt_ix_inc(scnt, six);
+ ERTS_TW_ASSERT(p->slot == ix);
+ ERTS_TW_ASSERT(ix == soon_slot(tpos));
+ ERTS_TW_ASSERT(p->timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ ERTS_TW_ASSERT(!check_min_tpos || tpos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
+ }
+ if (ix == scnt_slot)
+ scnt_soon_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
}
-#endif
+
+ later_tmo = 0;
+ scnt_slot = ERTS_TW_SOON_WHEEL_END_SLOT-1;
+ scnt_slots = ERTS_TW_SOON_WHEEL_SIZE;
+ scnt_six = 0;
+ scnt_later_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ for (ix = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ ix < ERTS_TW_LATER_WHEEL_END_SLOT;
+ ix++) {
+ p = tiw->w[ix];
+ six = scnt_get_ix(ix);
+ ERTS_TW_ASSERT(!p || six == scnt_six);
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ six = scnt_get_ix(ix);
+ do {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ later_tmo++;
+ scnt_ix_inc(scnt, six);
+ ERTS_TW_ASSERT(p->slot == ix);
+ ERTS_TW_ASSERT(later_slot(tpos) == ix);
+ tpos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ ERTS_TW_ASSERT(!check_min_tpos || tpos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
+ }
+ if (ix == scnt_slot)
+ scnt_later_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ }
+
+ if (tiw->yield_slot != ERTS_TW_SLOT_INACTIVE) {
+ p = tiw->sentinel.next;
+ ix = tiw->yield_slot;
+ while (p != &tiw->sentinel) {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ ERTS_TW_ASSERT(ix == p->slot);
+ if (ix == ERTS_TW_SLOT_AT_ONCE)
+ at_once_tmo++;
+ else {
+ scnt_inc(scnt, ix);
+ if (ix >= ERTS_TW_LATER_WHEEL_FIRST_SLOT) {
+ later_tmo++;
+ ERTS_TW_ASSERT(ix == later_slot(tpos));
+ }
+ else {
+ soon_tmo++;
+ ERTS_TW_ASSERT(ix == (tpos & ERTS_TW_SOON_WHEEL_MASK));
+ ERTS_TW_ASSERT(tpos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ }
+ p = p->next;
+ }
+ }
+ }
+
+
+ ERTS_TW_ASSERT(tiw->at_once.nto == at_once_tmo);
+ ERTS_TW_ASSERT(tiw->soon.nto == soon_tmo);
+ ERTS_TW_ASSERT(tiw->later.nto == later_tmo);
+ ERTS_TW_ASSERT(tiw->nto == soon_tmo + later_tmo + at_once_tmo);
+
+ for (six = 0; six < ERTS_TW_SCNT_SIZE; six++)
+ ERTS_TW_ASSERT(scnt[six] == tiw->scnt[six]);
}
-#endif /* ERTS_TW_DEBUG */
+
+#endif /* ERTS_TW_HARD_DEBUG */
diff --git a/erts/emulator/beam/trace_instrs.tab b/erts/emulator/beam/trace_instrs.tab
new file mode 100644
index 0000000000..3eee81c053
--- /dev/null
+++ b/erts/emulator/beam/trace_instrs.tab
@@ -0,0 +1,168 @@
+// -*- c -*-
+//
+// %CopyrightBegin%
+//
+// Copyright Ericsson AB 2017. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// %CopyrightEnd%
+//
+
+return_trace() {
+ ErtsCodeMFA* mfa = (ErtsCodeMFA *)(E[0]);
+
+ SWAPOUT; /* Needed for shared heap */
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ erts_trace_return(c_p, mfa, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */);
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ SWAPIN;
+ c_p->cp = NULL;
+ SET_I((BeamInstr *) cp_val(E[2]));
+ E += 3;
+ Goto(*I);
+ //| -no_next
+}
+
+i_generic_breakpoint() {
+ BeamInstr real_I;
+ HEAVY_SWAPOUT;
+ real_I = erts_generic_breakpoint(c_p, erts_code_to_codeinfo(I), reg);
+ HEAVY_SWAPIN;
+ ASSERT(VALID_INSTR(real_I));
+ Goto(real_I);
+ //| -no_next
+}
+
+i_return_time_trace() {
+ BeamInstr *pc = (BeamInstr *) (UWord) E[0];
+ SWAPOUT;
+ erts_trace_time_return(c_p, erts_code_to_codeinfo(pc));
+ SWAPIN;
+ c_p->cp = NULL;
+ SET_I((BeamInstr *) cp_val(E[1]));
+ E += 2;
+ Goto(*I);
+ //| -no_next
+}
+
+i_return_to_trace() {
+ if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) {
+ Uint *cpp = (Uint*) E;
+ for(;;) {
+ ASSERT(is_CP(*cpp));
+ if (IsOpCode(*cp_val(*cpp), return_trace)) {
+ do
+ ++cpp;
+ while (is_not_CP(*cpp));
+ cpp += 2;
+ } else if (IsOpCode(*cp_val(*cpp), i_return_to_trace)) {
+ do
+ ++cpp;
+ while (is_not_CP(*cpp));
+ } else {
+ break;
+ }
+ }
+ SWAPOUT; /* Needed for shared heap */
+ ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
+ erts_trace_return_to(c_p, cp_val(*cpp));
+ ERTS_REQ_PROC_MAIN_LOCK(c_p);
+ SWAPIN;
+ }
+ c_p->cp = NULL;
+ SET_I((BeamInstr *) cp_val(E[0]));
+ E += 1;
+ Goto(*I);
+ //| -no_next
+}
+
+i_yield() {
+ /* This is safe as long as REDS_IN(c_p) is never stored
+ * in c_p->arg_reg[0]. It is currently stored in c_p->def_arg_reg[5].
+ */
+ c_p->arg_reg[0] = am_true;
+ c_p->arity = 1; /* One living register (the 'true' return value) */
+ SWAPOUT;
+ $SET_CP_I_ABS($NEXT_INSTRUCTION);
+ c_p->current = NULL;
+ goto do_schedule;
+ //| -no_next
+}
+
+i_hibernate() {
+ HEAVY_SWAPOUT;
+ if (erts_hibernate(c_p, reg)) {
+ FCALLS = c_p->fcalls;
+ c_p->flags &= ~F_HIBERNATE_SCHED;
+ goto do_schedule;
+ } else {
+ HEAVY_SWAPIN;
+ I = handle_error(c_p, I, reg, &bif_export[BIF_hibernate_3]->info.mfa);
+ goto post_error_handling;
+ }
+ //| -no_next
+}
+
+// This is optimised as an instruction because
+// it has to be very very fast.
+
+i_perf_counter() {
+ ErtsSysPerfCounter ts;
+
+ ts = erts_sys_perf_counter();
+ if (IS_SSMALL(ts)) {
+ r(0) = make_small((Sint)ts);
+ } else {
+ $GC_TEST(0, ERTS_SINT64_HEAP_SIZE(ts), 0);
+ r(0) = make_big(HTOP);
+#if defined(ARCH_32)
+ if (ts >= (((Uint64) 1) << 32)) {
+ *HTOP = make_pos_bignum_header(2);
+ BIG_DIGIT(HTOP, 0) = (Uint) (ts & ((Uint) 0xffffffff));
+ BIG_DIGIT(HTOP, 1) = (Uint) ((ts >> 32) & ((Uint) 0xffffffff));
+ HTOP += 3;
+ }
+ else
+#endif
+ {
+ *HTOP = make_pos_bignum_header(1);
+ BIG_DIGIT(HTOP, 0) = (Uint) ts;
+ HTOP += 2;
+ }
+ }
+}
+
+i_debug_breakpoint() {
+ HEAVY_SWAPOUT;
+ I = call_error_handler(c_p, erts_code_to_codemfa(I), reg, am_breakpoint);
+ HEAVY_SWAPIN;
+ if (I) {
+ Goto(*I);
+ }
+ goto handle_error;
+ //| -no_next
+}
+
+
+
+//
+// Special jump instruction used for tracing. Takes an absolute
+// failure address.
+//
+
+trace_jump(Fail) {
+ //| -no_next
+ SET_I((BeamInstr *) $Fail);
+ Goto(*I);
+}
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 6786657faf..d81bd89a48 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -42,7 +42,7 @@
#include "dist.h"
#include "erl_printf.h"
#include "erl_threads.h"
-#include "erl_smp.h"
+#include "erl_lock_count.h"
#include "erl_time.h"
#include "erl_thr_progress.h"
#include "erl_thr_queue.h"
@@ -51,11 +51,15 @@
#include "erl_ptab.h"
#include "erl_check_io.h"
#include "erl_bif_unique.h"
+#include "erl_io_queue.h"
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
#ifdef HIPE
# include "hipe_mode_switch.h"
#endif
+#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
+#include "erl_nfunc_sched.h"
+#include "erl_proc_sig_queue.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -136,7 +140,7 @@ Eterm*
erts_set_hole_marker(Eterm* ptr, Uint sz)
{
Eterm* p = ptr;
- int i;
+ Uint i;
for (i = 0; i < sz; i++) {
*p++ = ERTS_HOLE_MARKER;
@@ -204,9 +208,8 @@ erl_grow_wstack(ErtsWStack* s, Uint need)
void
erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes)
{
- Uint old_size = s->pend - s->pstart;
+ Uint old_size = s->size;
Uint new_size;
- Uint sp_offs = s->psp - s->pstart;
if (need_bytes < old_size)
new_size = 2 * old_size;
@@ -220,8 +223,7 @@ erl_grow_pstack(ErtsPStack* s, void* default_pstack, unsigned need_bytes)
sys_memcpy(new_ptr, s->pstart, old_size);
s->pstart = new_ptr;
}
- s->pend = s->pstart + new_size;
- s->psp = s->pstart + sp_offs;
+ s->size = new_size;
}
/*
@@ -348,40 +350,41 @@ int erts_fit_in_bits_uint(Uint value)
}
int
-erts_print(int to, void *arg, char *format, ...)
+erts_print(fmtfn_t to, void *arg, char *format, ...)
{
int res;
va_list arg_list;
va_start(arg_list, format);
- if (to < ERTS_PRINT_MIN)
- res = -EINVAL;
- else {
- switch (to) {
- case ERTS_PRINT_STDOUT:
+ {
+ switch ((UWord)to) {
+ case (UWord)ERTS_PRINT_STDOUT:
res = erts_vprintf(format, arg_list);
break;
- case ERTS_PRINT_STDERR:
+ case (UWord)ERTS_PRINT_STDERR:
res = erts_vfprintf(stderr, format, arg_list);
break;
- case ERTS_PRINT_FILE:
+ case (UWord)ERTS_PRINT_FILE:
res = erts_vfprintf((FILE *) arg, format, arg_list);
break;
- case ERTS_PRINT_SBUF:
+ case (UWord)ERTS_PRINT_SBUF:
res = erts_vsprintf((char *) arg, format, arg_list);
break;
- case ERTS_PRINT_SNBUF:
+ case (UWord)ERTS_PRINT_SNBUF:
res = erts_vsnprintf(((erts_print_sn_buf *) arg)->buf,
((erts_print_sn_buf *) arg)->size,
format,
arg_list);
break;
- case ERTS_PRINT_DSBUF:
+ case (UWord)ERTS_PRINT_DSBUF:
res = erts_vdsprintf((erts_dsprintf_buf_t *) arg, format, arg_list);
break;
- default:
- res = erts_vfdprintf((int) to, format, arg_list);
+ case (UWord)ERTS_PRINT_FD:
+ res = erts_vfdprintf((int)(SWord) arg, format, arg_list);
break;
+ default:
+ res = erts_vcbprintf(to, arg, format, arg_list);
+ break;
}
}
@@ -390,7 +393,7 @@ erts_print(int to, void *arg, char *format, ...)
}
int
-erts_putc(int to, void *arg, char c)
+erts_putc(fmtfn_t to, void *arg, char c)
{
return erts_print(to, arg, "%c", c);
}
@@ -699,12 +702,7 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
/* make a hash index from an erlang term */
/*
-** There are three hash functions.
-** make_broken_hash: the one used for backward compatibility
-** is called from the bif erlang:hash/2. Should never be used
-** as it a) hashes only a part of binaries, b) hashes bignums really poorly,
-** c) hashes bignums differently on different endian processors and d) hashes
-** small integers with different weights on different bytes.
+** There are two hash functions.
**
** make_hash: A hash function that will give the same values for the same
** terms regardless of the internal representation. Small integers are
@@ -893,11 +891,11 @@ tail_recur:
{
Export* ep = *((Export **) (export_val(term) + 1));
- hash = hash * FUNNY_NUMBER11 + ep->code[2];
+ hash = hash * FUNNY_NUMBER11 + ep->info.mfa.arity;
hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
+ (atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue);
hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
+ (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue);
break;
}
@@ -1044,6 +1042,10 @@ tail_recur:
DESTROY_WSTACK(stack);
return hash;
+#undef MAKE_HASH_TUPLE_OP
+#undef MAKE_HASH_TERM_ARRAY_OP
+#undef MAKE_HASH_CDR_PRE_OP
+#undef MAKE_HASH_CDR_POST_OP
#undef UINT32_HASH_STEP
#undef UINT32_HASH_RET
}
@@ -1330,11 +1332,11 @@ make_hash2(Eterm term)
{
Export* ep = *((Export **) (export_val(term) + 1));
UINT32_HASH_2
- (ep->code[2],
- atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue,
+ (ep->info.mfa.arity,
+ atom_tab(atom_val(ep->info.mfa.module))->slot.bucket.hvalue,
HCONST);
UINT32_HASH
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue,
+ (atom_tab(atom_val(ep->info.mfa.function))->slot.bucket.hvalue,
HCONST_14);
goto hash2_common;
}
@@ -1578,16 +1580,13 @@ do { /* Lightweight mixing of constant (type info) */ \
} while (0)
Uint32
-make_internal_hash(Eterm term)
+make_internal_hash(Eterm term, Uint32 salt)
{
Uint32 hash;
- Uint32 hash_xor_pairs;
-
- ERTS_UNDEF(hash_xor_pairs, 0);
/* Optimization. Simple cases before declaration of estack. */
if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
- hash = 0;
+ hash = salt;
#if ERTS_SIZEOF_ETERM == 8
UINT32_HASH_2((Uint32)term, (Uint32)(term >> 32), HCONST);
#elif ERTS_SIZEOF_ETERM == 4
@@ -1601,7 +1600,7 @@ make_internal_hash(Eterm term)
Eterm tmp;
DECLARE_ESTACK(s);
- hash = 0;
+ hash = salt;
for (;;) {
switch (primary_tag(term)) {
case TAG_PRIMARY_LIST:
@@ -1664,6 +1663,11 @@ make_internal_hash(Eterm term)
Eterm* ptr = boxed_val(term) + 1;
Uint size;
int i;
+
+ /*
+ * We rely on key-value iteration order being constant
+ * for identical maps (in this VM instance).
+ */
switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
case HAMT_SUBTAG_HEAD_FLATMAP:
{
@@ -1675,26 +1679,10 @@ make_internal_hash(Eterm term)
if (size == 0)
goto pop_next;
- /* We want a hash function that is *independent* of
- * the order in which keys and values are encountered.
- * We therefore calculate context independent hashes for all .
- * key-value pairs and then xor them together.
- *
- * We *do* need to use an initial seed for each pair, i.e. the
- * hash value, so the hash value is reset for each pair with 'hash'.
- * If we don't, no additional entropy is given to the system and the
- * hash collision resolution in maps:from_list/1 would fail.
- */
- ESTACK_PUSH(s, hash_xor_pairs);
- ESTACK_PUSH(s, hash);
- ESTACK_PUSH(s, HASH_MAP_TAIL);
for (i = size - 1; i >= 0; i--) {
- ESTACK_PUSH(s, hash); /* initial seed for all pairs */
- ESTACK_PUSH(s, HASH_MAP_PAIR);
ESTACK_PUSH(s, vs[i]);
ESTACK_PUSH(s, ks[i]);
}
- hash_xor_pairs = 0;
goto pop_next;
}
case HAMT_SUBTAG_HEAD_ARRAY:
@@ -1703,10 +1691,6 @@ make_internal_hash(Eterm term)
UINT32_HASH(size, HCONST_16);
if (size == 0)
goto pop_next;
- ESTACK_PUSH(s, hash_xor_pairs);
- ESTACK_PUSH(s, hash);
- ESTACK_PUSH(s, HASH_MAP_TAIL);
- hash_xor_pairs = 0;
}
switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
case HAMT_SUBTAG_HEAD_ARRAY:
@@ -1722,8 +1706,6 @@ make_internal_hash(Eterm term)
while (i) {
if (is_list(*ptr)) {
Eterm* cons = list_val(*ptr);
- ESTACK_PUSH(s, hash); /* initial seed for all pairs */
- ESTACK_PUSH(s, HASH_MAP_PAIR);
ESTACK_PUSH(s, CDR(cons));
ESTACK_PUSH(s, CAR(cons));
}
@@ -1739,7 +1721,7 @@ make_internal_hash(Eterm term)
case EXPORT_SUBTAG:
{
Export* ep = *((Export **) (export_val(term) + 1));
- /* Assumes Export entries never moves */
+ /* Assumes Export entries never move */
POINTER_HASH(ep, HCONST_14);
goto pop_next;
}
@@ -1838,7 +1820,7 @@ make_internal_hash(Eterm term)
break;
case REF_SUBTAG:
UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
- ASSERT(internal_ref_no_of_numbers(term) == 3);
+ ASSERT(internal_ref_no_numbers(term) == 3);
UINT32_HASH_2(internal_ref_numbers(term)[1],
internal_ref_numbers(term)[2], HCONST_8);
goto pop_next;
@@ -1847,7 +1829,7 @@ make_internal_hash(Eterm term)
{
ExternalThing* thing = external_thing_ptr(term);
- ASSERT(external_thing_ref_no_of_numbers(thing) == 3);
+ ASSERT(external_thing_ref_no_numbers(thing) == 3);
/* See limitation #2 */
#ifdef ARCH_64
POINTER_HASH(thing->node, HCONST_7);
@@ -1894,7 +1876,7 @@ make_internal_hash(Eterm term)
goto pop_next;
}
default:
- erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
+ erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
}
}
break;
@@ -1907,7 +1889,7 @@ make_internal_hash(Eterm term)
goto pop_next;
default:
- erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term);
+ erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_internal_hash(0x%X, %lu)\n", term, salt);
pop_next:
if (ESTACK_ISEMPTY(s)) {
@@ -1919,17 +1901,6 @@ make_internal_hash(Eterm term)
term = ESTACK_POP(s);
switch (term) {
- case HASH_MAP_TAIL: {
- hash = (Uint32) ESTACK_POP(s);
- UINT32_HASH(hash_xor_pairs, HCONST_19);
- hash_xor_pairs = (Uint32) ESTACK_POP(s);
- goto pop_next;
- }
- case HASH_MAP_PAIR:
- hash_xor_pairs ^= hash;
- hash = (Uint32) ESTACK_POP(s); /* initial seed for all pairs */
- goto pop_next;
-
case HASH_CDR:
CONST_HASH(HCONST_18); /* Hash CDR i cons cell */
goto pop_next;
@@ -1953,437 +1924,164 @@ make_internal_hash(Eterm term)
#undef HCONST
#undef MIX
-
-Uint32 make_broken_hash(Eterm term)
-{
- Uint32 hash = 0;
- DECLARE_WSTACK(stack);
- unsigned op;
-tail_recur:
- op = tag_val_def(term);
- for (;;) {
- switch (op) {
- case NIL_DEF:
- hash = hash*FUNNY_NUMBER3 + 1;
- break;
- case ATOM_DEF:
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(term))->slot.bucket.hvalue);
- break;
- case SMALL_DEF:
-#if defined(ARCH_64)
- {
- Sint y1 = signed_val(term);
- Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
- Uint32 y3 = (Uint32) (y2 >> 32);
- int arity = 1;
-
-#if defined(WORDS_BIGENDIAN)
- if (!IS_SSMALL28(y1))
- { /* like a bignum */
- Uint32 y4 = (Uint32) y2;
- hash = hash*FUNNY_NUMBER2 + ((y4 << 16) | (y4 >> 16));
- if (y3) {
- hash = hash*FUNNY_NUMBER2 + ((y3 << 16) | (y3 >> 16));
- arity++;
- }
- hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
- } else {
- hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
- }
-#else
- if (!IS_SSMALL28(y1))
- { /* like a bignum */
- hash = hash*FUNNY_NUMBER2 + ((Uint32) y2);
- if (y3)
- {
- hash = hash*FUNNY_NUMBER2 + y3;
- arity++;
- }
- hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
- } else {
- hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
- }
-#endif
- }
-#else
- hash = hash*FUNNY_NUMBER2 + unsigned_val(term);
-#endif
- break;
-
- case BINARY_DEF:
- {
- size_t sz = binary_size(term);
- size_t i = (sz < 15) ? sz : 15;
-
- hash = hash_binary_bytes(term, i, hash);
- hash = hash*FUNNY_NUMBER4 + sz;
- break;
- }
-
- case EXPORT_DEF:
- {
- Export* ep = *((Export **) (export_val(term) + 1));
-
- hash = hash * FUNNY_NUMBER11 + ep->code[2];
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
- break;
- }
-
- case FUN_DEF:
- {
- ErlFunThing* funp = (ErlFunThing *) fun_val(term);
- Uint num_free = funp->num_free;
-
- hash = hash * FUNNY_NUMBER10 + num_free;
- hash = hash*FUNNY_NUMBER1 +
- (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
- hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
- hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
- if (num_free > 0) {
- if (num_free > 1) {
- WSTACK_PUSH3(stack, (UWord) &funp->env[1], (num_free-1), MAKE_HASH_TERM_ARRAY_OP);
- }
- term = funp->env[0];
- goto tail_recur;
- }
- break;
- }
-
- case PID_DEF:
- hash = hash*FUNNY_NUMBER5 + internal_pid_number(term);
- break;
- case EXTERNAL_PID_DEF:
- hash = hash*FUNNY_NUMBER5 + external_pid_number(term);
- break;
- case PORT_DEF:
- hash = hash*FUNNY_NUMBER9 + internal_port_number(term);
- break;
- case EXTERNAL_PORT_DEF:
- hash = hash*FUNNY_NUMBER9 + external_port_number(term);
- break;
- case REF_DEF:
- hash = hash*FUNNY_NUMBER9 + internal_ref_numbers(term)[0];
- break;
- case EXTERNAL_REF_DEF:
- hash = hash*FUNNY_NUMBER9 + external_ref_numbers(term)[0];
- break;
- case FLOAT_DEF:
- {
- FloatDef ff;
- GET_DOUBLE(term, ff);
- if (ff.fd == 0.0f) {
- /* ensure positive 0.0 */
- ff.fd = erts_get_positive_zero_float();
- }
- hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
- }
- break;
- case MAKE_HASH_CDR_PRE_OP:
- term = (Eterm) WSTACK_POP(stack);
- if (is_not_list(term)) {
- WSTACK_PUSH(stack, (UWord) MAKE_HASH_CDR_POST_OP);
- goto tail_recur;
- }
- /*fall through*/
- case LIST_DEF:
- {
- Eterm* list = list_val(term);
- WSTACK_PUSH2(stack, (UWord) CDR(list),
- (UWord) MAKE_HASH_CDR_PRE_OP);
- term = CAR(list);
- goto tail_recur;
- }
-
- case MAKE_HASH_CDR_POST_OP:
- hash *= FUNNY_NUMBER8;
- break;
-
- case BIG_DEF:
- {
- Eterm* ptr = big_val(term);
- int is_neg = BIG_SIGN(ptr);
- Uint arity = BIG_ARITY(ptr);
- Uint i = arity;
- ptr++;
-#if D_EXP == 16
- /* hash over 32 bit LE */
-
- while(i--) {
- hash = hash*FUNNY_NUMBER2 + *ptr++;
- }
-#elif D_EXP == 32
-
-#if defined(WORDS_BIGENDIAN)
- while(i--) {
- Uint d = *ptr++;
- hash = hash*FUNNY_NUMBER2 + ((d << 16) | (d >> 16));
- }
-#else
- while(i--) {
- hash = hash*FUNNY_NUMBER2 + *ptr++;
- }
-#endif
-
-#elif D_EXP == 64
- {
- Uint32 h = 0, l;
-#if defined(WORDS_BIGENDIAN)
- while(i--) {
- Uint d = *ptr++;
- l = d & 0xffffffff;
- h = d >> 32;
- hash = hash*FUNNY_NUMBER2 + ((l << 16) | (l >> 16));
- if (h || i)
- hash = hash*FUNNY_NUMBER2 + ((h << 16) | (h >> 16));
- }
-#else
- while(i--) {
- Uint d = *ptr++;
- l = d & 0xffffffff;
- h = d >> 32;
- hash = hash*FUNNY_NUMBER2 + l;
- if (h || i)
- hash = hash*FUNNY_NUMBER2 + h;
- }
-#endif
- /* adjust arity to match 32 bit mode */
- arity = (arity << 1) - (h == 0);
- }
-
-#else
-#error "unsupported D_EXP size"
-#endif
- hash = hash * (is_neg ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
- }
- break;
-
- case MAP_DEF:
- hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term);
- break;
- case TUPLE_DEF:
- {
- Eterm* ptr = tuple_val(term);
- Uint arity = arityval(*ptr);
-
- WSTACK_PUSH3(stack, (UWord) arity, (UWord) (ptr+1), (UWord) arity);
- op = MAKE_HASH_TUPLE_OP;
- }/*fall through*/
- case MAKE_HASH_TUPLE_OP:
- case MAKE_HASH_TERM_ARRAY_OP:
- {
- Uint i = (Uint) WSTACK_POP(stack);
- Eterm* ptr = (Eterm*) WSTACK_POP(stack);
- if (i != 0) {
- term = *ptr;
- WSTACK_PUSH3(stack, (UWord)(ptr+1), (UWord) i-1, (UWord) op);
- goto tail_recur;
- }
- if (op == MAKE_HASH_TUPLE_OP) {
- Uint32 arity = (UWord) WSTACK_POP(stack);
- hash = hash*FUNNY_NUMBER9 + arity;
- }
- break;
- }
-
- default:
- erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_broken_hash\n");
- return 0;
- }
- if (WSTACK_ISEMPTY(stack)) break;
- op = (Uint) WSTACK_POP(stack);
- }
-
- DESTROY_WSTACK(stack);
- return hash;
-
-#undef MAKE_HASH_TUPLE_OP
-#undef MAKE_HASH_TERM_ARRAY_OP
-#undef MAKE_HASH_CDR_PRE_OP
-#undef MAKE_HASH_CDR_POST_OP
-}
-
+/* error_logger !
+ {log, Level, format, [args], #{ gl, pid, time, error_logger => #{tag, emulator => true} }}
+*/
static Eterm
-do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
- ErlHeapFragment **bp, Process **p, Uint sz)
+do_allocate_logger_message(Eterm gleader, ErtsMonotonicTime *ts, Eterm *pid,
+ Eterm **hp, ErlOffHeap **ohp,
+ ErlHeapFragment **bp, Uint sz)
{
Uint gl_sz;
gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
- sz = sz + gl_sz;
+ sz = sz + gl_sz + 6 /*outer 5-tuple*/
+ + MAP2_SZ /* error_logger map */;
-#ifndef ERTS_SMP
-#ifdef USE_THREADS
- if (!erts_get_scheduler_data()) /* Must be scheduler thread */
- *p = NULL;
+ *pid = erts_get_current_pid();
+
+ if (is_nil(gleader) && is_non_value(*pid)) {
+ sz += MAP2_SZ /* metadata map no gl, no pid */;
+ } else if (is_nil(gleader) || is_non_value(*pid))
+ sz += MAP3_SZ /* metadata map no gl or no pid*/;
else
-#endif
- {
- *p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0);
- if (*p) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&(*p)->state);
- if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))
- *p = NULL;
- }
- }
+ sz += MAP4_SZ /* metadata map w gl w pid*/;
- if (!*p) {
- return NIL;
- }
+ *ts = ERTS_MONOTONIC_TO_USEC(erts_get_monotonic_time(NULL) + erts_get_time_offset());
+ erts_bld_sint64(NULL, &sz, *ts);
- /* So we have an error logger, lets build the message */
-#endif
*bp = new_message_buffer(sz);
*ohp = &(*bp)->off_heap;
*hp = (*bp)->mem;
- return (is_nil(gleader)
- ? am_noproc
- : (IS_CONST(gleader)
- ? gleader
- : copy_struct(gleader,gl_sz,hp,*ohp)));
+ return copy_struct(gleader,gl_sz,hp,*ohp);
}
-static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment *bp,
- Process *p, Eterm message)
+static void do_send_logger_message(Eterm gl, Eterm tag, Eterm format, Eterm args,
+ ErtsMonotonicTime ts, Eterm pid,
+ Eterm *hp, ErlHeapFragment *bp)
{
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "%T\n", message);
-#endif
-#ifdef ERTS_SMP
- {
- Eterm from = erts_get_current_pid();
- if (is_not_internal_pid(from))
- from = NIL;
- erts_queue_error_logger_message(from, message, bp);
+ Eterm message, md, el_tag = tag;
+ Eterm time = erts_bld_sint64(&hp, NULL, ts);
+
+ /* This mapping is needed for the backwards compatible error_logger */
+ switch (tag) {
+ case am_info: el_tag = am_info_msg; break;
+ case am_warning: el_tag = am_warning_msg; break;
+ default:
+ ASSERT(am_error);
+ break;
}
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(p, 0, mp, message, am_system);
+
+ md = MAP2(hp, am_emulator, am_true, ERTS_MAKE_AM("tag"), el_tag);
+ hp += MAP2_SZ;
+
+ if (is_nil(gl) && is_non_value(pid)) {
+ /* no gl and no pid, probably from a port */
+ md = MAP2(hp,
+ am_error_logger, md,
+ am_time, time);
+ hp += MAP2_SZ;
+ pid = NIL;
+ } else if (is_nil(gl)) {
+ /* no gl */
+ md = MAP3(hp,
+ am_error_logger, md,
+ am_pid, pid,
+ am_time, time);
+ hp += MAP3_SZ;
+ } else if (is_non_value(pid)) {
+ /* no gl */
+ md = MAP3(hp,
+ am_error_logger, md,
+ ERTS_MAKE_AM("gl"), gl,
+ am_time, time);
+ hp += MAP3_SZ;
+ pid = NIL;
+ } else {
+ md = MAP4(hp,
+ am_error_logger, md,
+ ERTS_MAKE_AM("gl"), gl,
+ am_pid, pid,
+ am_time, time);
+ hp += MAP4_SZ;
}
-#endif
+
+ message = TUPLE5(hp, am_log, tag, format, args, md);
+ erts_queue_error_logger_message(pid, message, bp);
}
-/* error_logger !
- {notify,{info_msg,gleader,{emulator,format,[args]}}} |
- {notify,{error,gleader,{emulator,format,[args]}}} |
- {notify,{warning_msg,gleader,{emulator,format,[args}]}} */
-static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
+static int do_send_to_logger(Eterm tag, Eterm gl, char *buf, size_t len)
{
Uint sz;
- Eterm gl;
- Eterm list,args,format,tuple1,tuple2,tuple3;
+ Eterm list, args, format, pid;
+ ErtsMonotonicTime ts;
Eterm *hp = NULL;
ErlOffHeap *ohp = NULL;
ErlHeapFragment *bp = NULL;
- Process *p = NULL;
-
- ASSERT(is_atom(tag));
-
- if (len <= 0) {
- return -1;
- }
sz = len * 2 /* message list */ + 2 /* cons surrounding message list */
- + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */
+ 8 /* "~s~n" */;
/* gleader size is accounted and allocated next */
- gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz);
-
- if(is_nil(gl)) {
- /* buf *always* points to a null terminated string */
- erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n",
- tag, buf);
- return 0;
- }
+ gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
list = buf_to_intlist(&hp, buf, len, NIL);
args = CONS(hp,list,NIL);
hp += 2;
format = buf_to_intlist(&hp, "~s~n", 4, NIL);
- tuple1 = TUPLE3(hp, am_emulator, format, args);
- hp += 4;
- tuple2 = TUPLE3(hp, tag, gl, tuple1);
- hp += 4;
- tuple3 = TUPLE2(hp, am_notify, tuple2);
- do_send_logger_message(hp, ohp, bp, p, tuple3);
+ do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
return 0;
}
-static int do_send_term_to_logger(Eterm tag, Eterm gleader,
- char *buf, int len, Eterm args)
+static int do_send_term_to_logger(Eterm tag, Eterm gl,
+ char *buf, size_t len, Eterm args)
{
Uint sz;
- Eterm gl;
Uint args_sz;
- Eterm format,tuple1,tuple2,tuple3;
+ Eterm format, pid;
+ ErtsMonotonicTime ts;
Eterm *hp = NULL;
ErlOffHeap *ohp = NULL;
ErlHeapFragment *bp = NULL;
- Process *p = NULL;
- ASSERT(is_atom(tag));
+ ASSERT(len > 0);
args_sz = size_object(args);
- sz = len * 2 /* format */ + args_sz
- + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */;
+ sz = len * 2 /* format */ + args_sz;
/* gleader size is accounted and allocated next */
- gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz);
-
- if(is_nil(gl)) {
- /* buf *always* points to a null terminated string */
- erts_fprintf(stderr, "(no error logger present) %T: \"%s\" %T\n",
- tag, buf, args);
- return 0;
- }
+ gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
format = buf_to_intlist(&hp, buf, len, NIL);
args = copy_struct(args, args_sz, &hp, ohp);
- tuple1 = TUPLE3(hp, am_emulator, format, args);
- hp += 4;
- tuple2 = TUPLE3(hp, tag, gl, tuple1);
- hp += 4;
- tuple3 = TUPLE2(hp, am_notify, tuple2);
- do_send_logger_message(hp, ohp, bp, p, tuple3);
+ do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
return 0;
}
static ERTS_INLINE int
-send_info_to_logger(Eterm gleader, char *buf, int len)
+send_info_to_logger(Eterm gleader, char *buf, size_t len)
{
- return do_send_to_logger(am_info_msg, gleader, buf, len);
+ return do_send_to_logger(am_info, gleader, buf, len);
}
static ERTS_INLINE int
-send_warning_to_logger(Eterm gleader, char *buf, int len)
+send_warning_to_logger(Eterm gleader, char *buf, size_t len)
{
- Eterm tag;
- switch (erts_error_logger_warnings) {
- case am_info: tag = am_info_msg; break;
- case am_warning: tag = am_warning_msg; break;
- default: tag = am_error; break;
- }
- return do_send_to_logger(tag, gleader, buf, len);
+ return do_send_to_logger(erts_error_logger_warnings, gleader, buf, len);
}
static ERTS_INLINE int
-send_error_to_logger(Eterm gleader, char *buf, int len)
+send_error_to_logger(Eterm gleader, char *buf, size_t len)
{
return do_send_to_logger(am_error, gleader, buf, len);
}
static ERTS_INLINE int
-send_error_term_to_logger(Eterm gleader, char *buf, int len, Eterm args)
+send_error_term_to_logger(Eterm gleader, char *buf, size_t len, Eterm args)
{
return do_send_term_to_logger(am_error, gleader, buf, len, args);
}
@@ -2739,22 +2437,20 @@ tailrecur_ne:
anum = external_thing_ref_numbers(athing);
bnum = external_thing_ref_numbers(bthing);
- alen = external_thing_ref_no_of_numbers(athing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ alen = external_thing_ref_no_numbers(athing);
+ blen = external_thing_ref_no_numbers(bthing);
goto ref_common;
+
case REF_SUBTAG:
- if (!is_internal_ref(b))
- goto not_equal;
- {
- RefThing* athing = ref_thing_ptr(a);
- RefThing* bthing = ref_thing_ptr(b);
- alen = internal_thing_ref_no_of_numbers(athing);
- blen = internal_thing_ref_no_of_numbers(bthing);
- anum = internal_thing_ref_numbers(athing);
- bnum = internal_thing_ref_numbers(bthing);
- }
+ if (!is_internal_ref(b))
+ goto not_equal;
+
+ alen = internal_ref_no_numbers(a);
+ anum = internal_ref_numbers(a);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
ref_common:
ASSERT(alen > 0 && blen > 0);
@@ -2919,27 +2615,6 @@ not_equal:
}
-/*
- * Lexically compare two strings of bytes (string s1 length l1 and s2 l2).
- *
- * s1 < s2 return -1
- * s1 = s2 return 0
- * s1 > s2 return +1
- */
-static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
-{
- int i;
- i = 0;
- while((i < l1) && (i < l2)) {
- if (s1[i] < s2[i]) return(-1);
- if (s1[i] > s2[i]) return(1);
- i++;
- }
- if (l1 < l2) return(-1);
- if (l1 > l2) return(1);
- return(0);
-}
-
/*
* Compare objects.
@@ -2953,20 +2628,6 @@ static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
*
*/
-
-#define float_comp(x,y) (((x)<(y)) ? -1 : (((x)==(y)) ? 0 : 1))
-
-int erts_cmp_atoms(Eterm a, Eterm b)
-{
- Atom *aa = atom_tab(atom_val(a));
- Atom *bb = atom_tab(atom_val(b));
- int diff = aa->ord0 - bb->ord0;
- if (diff)
- return diff;
- return cmpbytes(aa->name+3, aa->len-3,
- bb->name+3, bb->len-3);
-}
-
/* cmp(Eterm a, Eterm b)
* For compatibility with HiPE - arith-based compare.
*/
@@ -2977,22 +2638,6 @@ Sint cmp(Eterm a, Eterm b)
Sint erts_cmp_compound(Eterm a, Eterm b, int exact, int eq_only);
-Sint erts_cmp(Eterm a, Eterm b, int exact, int eq_only)
-{
- if (is_atom(a) && is_atom(b)) {
- return erts_cmp_atoms(a, b);
- } else if (is_both_small(a, b)) {
- return (signed_val(a) - signed_val(b));
- } else if (is_float(a) && is_float(b)) {
- FloatDef af, bf;
- GET_DOUBLE(a, af);
- GET_DOUBLE(b, bf);
- return float_comp(af.fd, bf.fd);
- }
- return erts_cmp_compound(a,b,exact,eq_only);
-}
-
-
/* erts_cmp(Eterm a, Eterm b, int exact)
* exact = 1 -> term-based compare
* exact = 0 -> arith-based compare
@@ -3289,7 +2934,7 @@ tailrecur_ne:
GET_DOUBLE(a, af);
GET_DOUBLE(b, bf);
- ON_CMP_GOTO(float_comp(af.fd, bf.fd));
+ ON_CMP_GOTO(erts_float_comp(af.fd, bf.fd));
}
case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
@@ -3306,13 +2951,15 @@ tailrecur_ne:
Export* a_exp = *((Export **) (export_val(a) + 1));
Export* b_exp = *((Export **) (export_val(b) + 1));
- if ((j = erts_cmp_atoms(a_exp->code[0], b_exp->code[0])) != 0) {
+ if ((j = erts_cmp_atoms(a_exp->info.mfa.module,
+ b_exp->info.mfa.module)) != 0) {
RETURN_NEQ(j);
}
- if ((j = erts_cmp_atoms(a_exp->code[1], b_exp->code[1])) != 0) {
+ if ((j = erts_cmp_atoms(a_exp->info.mfa.function,
+ b_exp->info.mfa.function)) != 0) {
RETURN_NEQ(j);
}
- ON_CMP_GOTO((Sint) a_exp->code[2] - (Sint) b_exp->code[2]);
+ ON_CMP_GOTO((Sint) a_exp->info.mfa.arity - (Sint) b_exp->info.mfa.arity);
}
break;
case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE):
@@ -3324,10 +2971,7 @@ tailrecur_ne:
ErlFunThing* f2 = (ErlFunThing *) fun_val(b);
Sint diff;
- diff = cmpbytes(atom_tab(atom_val(f1->fe->module))->name,
- atom_tab(atom_val(f1->fe->module))->len,
- atom_tab(atom_val(f2->fe->module))->name,
- atom_tab(atom_val(f2->fe->module))->len);
+ diff = erts_cmp_atoms((f1->fe)->module, (f2->fe)->module);
if (diff != 0) {
RETURN_NEQ(diff);
}
@@ -3384,25 +3028,21 @@ tailrecur_ne:
*/
if (is_internal_ref(b)) {
- RefThing* bthing = ref_thing_ptr(b);
bnode = erts_this_node;
- bnum = internal_thing_ref_numbers(bthing);
- blen = internal_thing_ref_no_of_numbers(bthing);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
} else if(is_external_ref(b)) {
ExternalThing* bthing = external_thing_ptr(b);
bnode = bthing->node;
bnum = external_thing_ref_numbers(bthing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ blen = external_thing_ref_no_numbers(bthing);
} else {
a_tag = REF_DEF;
goto mixed_types;
}
- {
- RefThing* athing = ref_thing_ptr(a);
- anode = erts_this_node;
- anum = internal_thing_ref_numbers(athing);
- alen = internal_thing_ref_no_of_numbers(athing);
- }
+ anode = erts_this_node;
+ alen = internal_ref_no_numbers(a);
+ anum = internal_ref_numbers(a);
ref_common:
CMP_NODES(anode, bnode);
@@ -3428,19 +3068,18 @@ tailrecur_ne:
ASSERT(alen == blen);
for (i = (Sint) alen - 1; i >= 0; i--)
if (anum[i] != bnum[i])
- RETURN_NEQ((Sint32) (anum[i] - bnum[i]));
+ RETURN_NEQ(anum[i] < bnum[i] ? -1 : 1);
goto pop_next;
case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE):
if (is_internal_ref(b)) {
- RefThing* bthing = ref_thing_ptr(b);
bnode = erts_this_node;
- bnum = internal_thing_ref_numbers(bthing);
- blen = internal_thing_ref_no_of_numbers(bthing);
+ blen = internal_ref_no_numbers(b);
+ bnum = internal_ref_numbers(b);
} else if (is_external_ref(b)) {
ExternalThing* bthing = external_thing_ptr(b);
bnode = bthing->node;
bnum = external_thing_ref_numbers(bthing);
- blen = external_thing_ref_no_of_numbers(bthing);
+ blen = external_thing_ref_no_numbers(bthing);
} else {
a_tag = EXTERNAL_REF_DEF;
goto mixed_types;
@@ -3449,7 +3088,7 @@ tailrecur_ne:
ExternalThing* athing = external_thing_ptr(a);
anode = athing->node;
anum = external_thing_ref_numbers(athing);
- alen = external_thing_ref_no_of_numbers(athing);
+ alen = external_thing_ref_no_numbers(athing);
}
goto ref_common;
default:
@@ -3469,6 +3108,9 @@ tailrecur_ne:
int cmp;
byte* a_ptr;
byte* b_ptr;
+ if (eq_only && a_size != b_size) {
+ RETURN_NEQ(a_size - b_size);
+ }
ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
@@ -3523,7 +3165,7 @@ tailrecur_ne:
if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
/* Float is within the no loss limit */
f1.fd = signed_val(aw);
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
#if ERTS_SIZEOF_ETERM == 8
else if (f2.fd > (double) (MAX_SMALL + 1)) {
@@ -3570,7 +3212,7 @@ tailrecur_ne:
if (big_to_double(aw, &f1.fd) < 0) {
j = big_sign(aw) ? -1 : 1;
} else {
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
} else {
big = double_to_big(f2.fd, big_buf, sizeof(big_buf)/sizeof(Eterm));
@@ -3586,7 +3228,7 @@ tailrecur_ne:
if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) {
/* Float is within the no loss limit */
f2.fd = signed_val(bw);
- j = float_comp(f1.fd, f2.fd);
+ j = erts_float_comp(f1.fd, f2.fd);
}
#if ERTS_SIZEOF_ETERM == 8
else if (f1.fd > (double) (MAX_SMALL + 1)) {
@@ -3826,40 +3468,41 @@ not_equal:
Eterm
store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
{
+ struct erl_off_heap_header *ohhp;
Uint i;
Uint size;
- Uint *from_hp;
- Uint *to_hp = *hpp;
+ Eterm *from_hp;
+ Eterm *to_hp = *hpp;
ASSERT(is_external(ns) || is_internal_ref(ns));
- if(is_external(ns)) {
- from_hp = external_val(ns);
- size = thing_arityval(*from_hp) + 1;
- *hpp += size;
-
- for(i = 0; i < size; i++)
- to_hp[i] = from_hp[i];
-
- erts_refc_inc(&((ExternalThing *) to_hp)->node->refc, 2);
-
- ((struct erl_off_heap_header*) to_hp)->next = oh->first;
- oh->first = (struct erl_off_heap_header*) to_hp;
-
- return make_external(to_hp);
- }
-
- /* Internal ref */
- from_hp = internal_ref_val(ns);
-
+ from_hp = boxed_val(ns);
size = thing_arityval(*from_hp) + 1;
-
*hpp += size;
for(i = 0; i < size; i++)
to_hp[i] = from_hp[i];
- return make_internal_ref(to_hp);
+ if (is_external_header(*from_hp)) {
+ ExternalThing *etp = (ExternalThing *) from_hp;
+ ASSERT(is_external(ns));
+ erts_refc_inc(&etp->node->refc, 2);
+ }
+ else if (is_ordinary_ref_thing(from_hp))
+ return make_internal_ref(to_hp);
+ else {
+ ErtsMRefThing *mreft = (ErtsMRefThing *) from_hp;
+ ErtsMagicBinary *mb = mreft->mb;
+ ASSERT(is_magic_ref_thing(from_hp));
+ erts_refc_inc(&mb->intern.refc, 2);
+ OH_OVERHEAD(oh, mb->orig_size / sizeof(Eterm));
+ }
+
+ ohhp = (struct erl_off_heap_header*) to_hp;
+ ohhp->next = oh->first;
+ oh->first = ohhp;
+
+ return make_boxed(to_hp);
}
Eterm
@@ -3876,7 +3519,7 @@ store_external_or_ref_in_proc_(Process *proc, Eterm ns)
return store_external_or_ref_(&hp, &MSO(proc), ns);
}
-void bin_write(int to, void *to_arg, byte* buf, size_t sz)
+void bin_write(fmtfn_t to, void *to_arg, byte* buf, size_t sz)
{
size_t i;
@@ -3922,6 +3565,122 @@ intlist_to_buf(Eterm list, char *buf, Sint len)
return -2; /* not enough space */
}
+/** @brief Fill buf with the UTF8 contents of the unicode list
+ * @param len Max number of characters to write.
+ * @param written NULL or bytes written.
+ * @return 0 ok,
+ * -1 type error,
+ * -2 list too long, only \c len characters written
+ */
+int
+erts_unicode_list_to_buf(Eterm list, byte *buf, Sint len, Sint* written)
+{
+ Eterm* listptr;
+ Sint sz = 0;
+ Sint val;
+ int res;
+
+ while (1) {
+ if (is_nil(list)) {
+ res = 0;
+ break;
+ }
+ if (is_not_list(list)) {
+ res = -1;
+ break;
+ }
+ listptr = list_val(list);
+
+ if (len-- <= 0) {
+ res = -2;
+ break;
+ }
+
+ if (is_not_small(CAR(listptr))) {
+ res = -1;
+ break;
+ }
+ val = signed_val(CAR(listptr));
+ if (0 <= val && val < 0x80) {
+ buf[sz] = val;
+ sz++;
+ } else if (val < 0x800) {
+ buf[sz+0] = 0xC0 | (val >> 6);
+ buf[sz+1] = 0x80 | (val & 0x3F);
+ sz += 2;
+ } else if (val < 0x10000UL) {
+ if (0xD800 <= val && val <= 0xDFFF) {
+ res = -1;
+ break;
+ }
+ buf[sz+0] = 0xE0 | (val >> 12);
+ buf[sz+1] = 0x80 | ((val >> 6) & 0x3F);
+ buf[sz+2] = 0x80 | (val & 0x3F);
+ sz += 3;
+ } else if (val < 0x110000) {
+ buf[sz+0] = 0xF0 | (val >> 18);
+ buf[sz+1] = 0x80 | ((val >> 12) & 0x3F);
+ buf[sz+2] = 0x80 | ((val >> 6) & 0x3F);
+ buf[sz+3] = 0x80 | (val & 0x3F);
+ sz += 4;
+ } else {
+ res = -1;
+ break;
+ }
+ list = CDR(listptr);
+ }
+
+ if (written)
+ *written = sz;
+ return res;
+}
+
+Sint
+erts_unicode_list_to_buf_len(Eterm list)
+{
+ Eterm* listptr;
+ Sint sz = 0;
+
+ if (is_nil(list)) {
+ return 0;
+ }
+ if (is_not_list(list)) {
+ return -1;
+ }
+ listptr = list_val(list);
+
+ while (1) {
+ Sint val;
+
+ if (is_not_small(CAR(listptr))) {
+ return -1;
+ }
+ val = signed_val(CAR(listptr));
+ if (0 <= val && val < 0x80) {
+ sz++;
+ } else if (val < 0x800) {
+ sz += 2;
+ } else if (val < 0x10000UL) {
+ if (0xD800 <= val && val <= 0xDFFF) {
+ return -1;
+ }
+ sz += 3;
+ } else if (val < 0x110000) {
+ sz += 4;
+ } else {
+ return -1;
+ }
+ list = CDR(listptr);
+ if (is_nil(list)) {
+ return sz;
+ }
+ if (is_not_list(list)) {
+ return -1;
+ }
+ listptr = list_val(list);
+ }
+}
+
/*
** Convert an integer to a byte list
** return pointer to converted stuff (need not to be at start of buf!)
@@ -4560,15 +4319,20 @@ erts_read_env(char *key)
char *value = erts_alloc(ERTS_ALC_T_TMP, value_len);
int res;
while (1) {
- res = erts_sys_getenv_raw(key, value, &value_len);
- if (res <= 0)
- break;
- value = erts_realloc(ERTS_ALC_T_TMP, value, value_len);
+ res = erts_sys_explicit_8bit_getenv(key, value, &value_len);
+
+ if (res >= 0) {
+ break;
+ }
+
+ value = erts_realloc(ERTS_ALC_T_TMP, value, value_len);
}
- if (res != 0) {
- erts_free(ERTS_ALC_T_TMP, value);
- return NULL;
+
+ if (res != 1) {
+ erts_free(ERTS_ALC_T_TMP, value);
+ return NULL;
}
+
return value;
}
@@ -4882,22 +4646,6 @@ void
erts_interval_init(erts_interval_t *icp)
{
erts_atomic64_init_nob(&icp->counter.atomic, 0);
-#ifdef DEBUG
- icp->smp_api = 0;
-#endif
-}
-
-void
-erts_smp_interval_init(erts_interval_t *icp)
-{
-#ifdef ERTS_SMP
- erts_interval_init(icp);
-#else
- icp->counter.not_atomic = 0;
-#endif
-#ifdef DEBUG
- icp->smp_api = 1;
-#endif
}
static ERTS_INLINE Uint64
@@ -4937,81 +4685,27 @@ ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
Uint64
erts_step_interval_nob(erts_interval_t *icp)
{
- ASSERT(!icp->smp_api);
return step_interval_nob(icp);
}
Uint64
erts_step_interval_relb(erts_interval_t *icp)
{
- ASSERT(!icp->smp_api);
return step_interval_relb(icp);
}
Uint64
-erts_smp_step_interval_nob(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return step_interval_nob(icp);
-#else
- return ++icp->counter.not_atomic;
-#endif
-}
-
-Uint64
-erts_smp_step_interval_relb(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return step_interval_relb(icp);
-#else
- return ++icp->counter.not_atomic;
-#endif
-}
-
-Uint64
erts_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
{
- ASSERT(!icp->smp_api);
return ensure_later_interval_nob(icp, ic);
}
Uint64
erts_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
{
- ASSERT(!icp->smp_api);
return ensure_later_interval_acqb(icp, ic);
}
-Uint64
-erts_smp_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return ensure_later_interval_nob(icp, ic);
-#else
- if (icp->counter.not_atomic > ic)
- return icp->counter.not_atomic;
- else
- return ++icp->counter.not_atomic;
-#endif
-}
-
-Uint64
-erts_smp_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return ensure_later_interval_acqb(icp, ic);
-#else
- if (icp->counter.not_atomic > ic)
- return icp->counter.not_atomic;
- else
- return ++icp->counter.not_atomic;
-#endif
-}
-
/*
* A millisecond timestamp without time correction where there's no hrtime
* - for tracing on "long" things...
@@ -5030,6 +4724,53 @@ Uint64 erts_timestamp_millis(void)
#endif
}
+void *
+erts_calc_stacklimit(char *prev_c, UWord stacksize)
+{
+ /*
+ * We *don't* want this function inlined, i.e., it is
+ * risky to call this function from another function
+ * in utils.c
+ */
+
+ UWord pagesize = erts_sys_get_page_size();
+ char c;
+ char *start;
+ if (&c > prev_c) {
+ start = (char *) ((((UWord) prev_c) / pagesize) * pagesize);
+ return (void *) (start + stacksize);
+ }
+ else {
+ start = (char *) (((((UWord) prev_c) - 1) / pagesize + 1) * pagesize);
+ return (void *) (start - stacksize);
+ }
+}
+
+/*
+ * erts_check_below_limit() and
+ * erts_check_above_limit() are put
+ * in utils.c in order to prevent
+ * inlining.
+ */
+
+int
+erts_check_below_limit(char *ptr, char *limit)
+{
+ return ptr < limit;
+}
+
+int
+erts_check_above_limit(char *ptr, char *limit)
+{
+ return ptr > limit;
+}
+
+void *
+erts_ptr_id(void *ptr)
+{
+ return ptr;
+}
+
#ifdef DEBUG
/*
* Handy functions when using a debugger - don't use in the code!